python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <[email protected]>
*/
#include <linux/pci.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
static u32 ast_get_vram_size(struct ast_device *ast)
{
u8 jreg;
u32 vram_size;
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
case 0:
vram_size = AST_VIDMEM_SIZE_8M;
break;
case 1:
vram_size = AST_VIDMEM_SIZE_16M;
break;
case 2:
vram_size = AST_VIDMEM_SIZE_32M;
break;
case 3:
vram_size = AST_VIDMEM_SIZE_64M;
break;
}
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
switch (jreg & 0x03) {
case 1:
vram_size -= 0x100000;
break;
case 2:
vram_size -= 0x200000;
break;
case 3:
vram_size -= 0x400000;
break;
}
return vram_size;
}
int ast_mm_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t base, size;
u32 vram_size;
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
/* Don't fail on errors, but performance might be reduced. */
devm_arch_io_reserve_memtype_wc(dev->dev, base, size);
devm_arch_phys_wc_add(dev->dev, base, size);
vram_size = ast_get_vram_size(ast);
ast->vram = devm_ioremap_wc(dev->dev, base, vram_size);
if (!ast->vram)
return -ENOMEM;
ast->vram_base = base;
ast->vram_size = vram_size;
ast->vram_fb_available = vram_size;
return 0;
}
| linux-master | drivers/gpu/drm/ast/ast_mm.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
/*
* DRM driver
*/
DEFINE_DRM_GEM_FOPS(ast_fops);
static const struct drm_driver ast_driver = {
.driver_features = DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_MODESET,
.fops = &ast_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_SHMEM_DRIVER_OPS
};
/*
* PCI driver
*/
#define PCI_VENDOR_ASPEED 0x1a03
#define AST_VGA_DEVICE(id, info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = PCI_VENDOR_ASPEED, \
.device = id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
static const struct pci_device_id ast_pciidlist[] = {
AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
{0, 0, 0},
};
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ast_device *ast;
struct drm_device *dev;
int ret;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
if (ret)
return ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
if (IS_ERR(ast))
return PTR_ERR(ast);
dev = &ast->base;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
return ret;
drm_fbdev_generic_setup(dev, 32);
return 0;
}
static void ast_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
}
static int ast_drm_freeze(struct drm_device *dev)
{
int error;
error = drm_mode_config_helper_suspend(dev);
if (error)
return error;
pci_save_state(to_pci_dev(dev->dev));
return 0;
}
static int ast_drm_thaw(struct drm_device *dev)
{
ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev);
}
static int ast_drm_resume(struct drm_device *dev)
{
if (pci_enable_device(to_pci_dev(dev->dev)))
return -EIO;
return ast_drm_thaw(dev);
}
static int ast_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
int error;
error = ast_drm_freeze(ddev);
if (error)
return error;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int ast_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_resume(ddev);
}
static int ast_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_freeze(ddev);
}
static int ast_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_thaw(ddev);
}
static int ast_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_freeze(ddev);
}
static const struct dev_pm_ops ast_pm_ops = {
.suspend = ast_pm_suspend,
.resume = ast_pm_resume,
.freeze = ast_pm_freeze,
.thaw = ast_pm_thaw,
.poweroff = ast_pm_poweroff,
.restore = ast_pm_resume,
};
static struct pci_driver ast_pci_driver = {
.name = DRIVER_NAME,
.id_table = ast_pciidlist,
.probe = ast_pci_probe,
.remove = ast_pci_remove,
.driver.pm = &ast_pm_ops,
};
drm_module_pci_driver_if_modeset(ast_pci_driver, ast_modeset);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/ast/ast_drv.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <[email protected]>
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <drm/drm_print.h>
#include "ast_dram_tables.h"
#include "ast_drv.h"
static void ast_post_chip_2300(struct drm_device *dev);
static void ast_post_chip_2500(struct drm_device *dev);
static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u8 i, index, reg;
const u8 *ext_reg_info;
/* reset scratch */
for (i = 0x81; i <= 0x9f; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
ext_reg_info = extreginfo_ast2300;
else
ext_reg_info = extreginfo;
index = 0xa0;
while (*ext_reg_info != 0xff) {
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
index++;
ext_reg_info++;
}
/* disable standard IO/MEM decode if secondary */
/* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
/* Set Ext. Default */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
/* Enable RAMDAC for A1 */
reg = 0x04;
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
reg |= 0x20;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
u32 ast_mindwm(struct ast_device *ast, u32 r)
{
uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
ast_write32(ast, 0xf000, 0x1);
do {
data = ast_read32(ast, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
{
uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
ast_write32(ast, 0xf000, 0x1);
do {
data = ast_read32(ast, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
}
/*
* AST2100/2150 DLL CBR Setting
*/
#define CBR_SIZE_AST2150 ((16 << 10) - 1)
#define CBR_PASSNUM_AST2150 5
#define CBR_THRESHOLD_AST2150 10
#define CBR_THRESHOLD2_AST2150 10
#define TIMEOUT_AST2150 5000000
#define CBR_PATNUM_AST2150 8
static const u32 pattern_AST2150[14] = {
0xFF00FF00,
0xCC33CC33,
0xAA55AA55,
0xFFFE0001,
0x683501FE,
0x0F1929B0,
0x2D0B4346,
0x60767F02,
0x6FBE36A6,
0x3A253035,
0x3019686D,
0x41C6167E,
0x620152BF,
0x20F050E0
};
static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
timeout = 0;
do {
data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
timeout = 0;
do {
data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
#if 0 /* unused in DDX driver - here for completeness */
static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
timeout = 0;
do {
data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
#endif
static int cbrtest_ast2150(struct ast_device *ast)
{
int i;
for (i = 0; i < 8; i++)
if (mmctestburst2_ast2150(ast, i))
return 0;
return 1;
}
static int cbrscan_ast2150(struct ast_device *ast, int busw)
{
u32 patcnt, loop;
for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
if (cbrtest_ast2150(ast))
break;
}
if (loop == CBR_PASSNUM_AST2150)
return 0;
}
return 1;
}
static void cbrdlli_ast2150(struct ast_device *ast, int busw)
{
u32 dll_min[4], dll_max[4], dlli, data, passcnt;
cbr_start:
dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
passcnt = 0;
for (dlli = 0; dlli < 100; dlli++) {
ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
data = cbrscan_ast2150(ast, busw);
if (data != 0) {
if (data & 0x1) {
if (dll_min[0] > dlli)
dll_min[0] = dlli;
if (dll_max[0] < dlli)
dll_max[0] = dlli;
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD_AST2150)
goto cbr_start;
}
if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
goto cbr_start;
dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
}
static void ast_init_dram_reg(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if ((j & 0x80) == 0) { /* VGA only */
if (IS_AST_GEN1(ast)) {
dram_reg_info = ast2000_dram_table_data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x10100, 0xa8);
do {
;
} while (ast_read32(ast, 0x10100) != 0xa8);
} else { /* GEN2/GEN3 */
if (ast->chip == AST2100 || ast->chip == AST2200)
dram_reg_info = ast2100_dram_table_data;
else
dram_reg_info = ast1100_dram_table_data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688A8A8);
do {
;
} while (ast_read32(ast, 0x12000) != 0x01);
ast_write32(ast, 0x10000, 0xfc600309);
do {
;
} while (ast_read32(ast, 0x10000) != 0x01);
}
while (dram_reg_info->index != 0xffff) {
if (dram_reg_info->index == 0xff00) {/* delay fn */
for (i = 0; i < 15; i++)
udelay(dram_reg_info->data);
} else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
data = dram_reg_info->data;
if (ast->dram_type == AST_DRAM_1Gx16)
data = 0x00000d89;
else if (ast->dram_type == AST_DRAM_1Gx32)
data = 0x00000c8d;
temp = ast_read32(ast, 0x12070);
temp &= 0xc;
temp <<= 2;
ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
} else
ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
dram_reg_info++;
}
/* AST 2100/2150 DRAM calibration */
data = ast_read32(ast, 0x10120);
if (data == 0x5061) { /* 266Mhz */
data = ast_read32(ast, 0x10004);
if (data & 0x40)
cbrdlli_ast2150(ast, 16); /* 16 bits */
else
cbrdlli_ast2150(ast, 32); /* 32 bits */
}
switch (AST_GEN(ast)) {
case 1:
temp = ast_read32(ast, 0x10140);
ast_write32(ast, 0x10140, temp | 0x40);
break;
case 2:
case 3:
temp = ast_read32(ast, 0x1200c);
ast_write32(ast, 0x1200c, temp & 0xfffffffd);
temp = ast_read32(ast, 0x12040);
ast_write32(ast, 0x12040, temp | 0x40);
break;
default:
break;
}
}
/* wait ready */
do {
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
} while ((j & 0x40) == 0);
}
void ast_post_gpu(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
ast_set_def_ext_reg(dev);
if (IS_AST_GEN7(ast)) {
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_launch(dev);
} else if (ast->config_mode == ast_use_p2a) {
if (IS_AST_GEN6(ast))
ast_post_chip_2500(dev);
else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
ast_post_chip_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev);
} else {
if (ast->tx_chip_types & AST_TX_SIL164_BIT)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
/* AST 2300 DRAM settings */
#define AST_DDR3 0
#define AST_DDR2 1
struct ast2300_dram_param {
u32 dram_type;
u32 dram_chipid;
u32 dram_freq;
u32 vram_size;
u32 odt;
u32 wodt;
u32 rodt;
u32 dram_config;
u32 reg_PERIOD;
u32 reg_MADJ;
u32 reg_SADJ;
u32 reg_MRS;
u32 reg_EMRS;
u32 reg_AC1;
u32 reg_AC2;
u32 reg_DQSIC;
u32 reg_DRV;
u32 reg_IOZ;
u32 reg_DQIDLY;
u32 reg_FREQ;
u32 madj_max;
u32 dll2_finetune_step;
};
/*
* DQSI DLL CBR Setting
*/
#define CBR_SIZE0 ((1 << 10) - 1)
#define CBR_SIZE1 ((4 << 10) - 1)
#define CBR_SIZE2 ((64 << 10) - 1)
#define CBR_PASSNUM 5
#define CBR_PASSNUM2 5
#define CBR_THRESHOLD 10
#define CBR_THRESHOLD2 10
#define TIMEOUT 5000000
#define CBR_PATNUM 8
static const u32 pattern[8] = {
0xFF00FF00,
0xCC33CC33,
0xAA55AA55,
0x88778877,
0x92CC4D6E,
0x543D3CDE,
0xF1E843C7,
0x7C61D253
};
static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
timeout = 0;
do {
data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
if (data & 0x2000)
return false;
if (++timeout > TIMEOUT) {
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return false;
}
} while (!data);
ast_moutdwm(ast, 0x1e6e0070, 0x0);
return true;
}
static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
timeout = 0;
do {
data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
if (++timeout > TIMEOUT) {
ast_moutdwm(ast, 0x1e6e0070, 0x0);
return 0xffffffff;
}
} while (!data);
data = ast_mindwm(ast, 0x1e6e0078);
data = (data | (data >> 16)) & 0xffff;
ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x41);
}
static bool mmc_test_single(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc5);
}
static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x05);
}
static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0x85);
}
static int cbr_test(struct ast_device *ast)
{
u32 data;
int i;
data = mmc_test_single2(ast, 0);
if ((data & 0xff) && (data & 0xff00))
return 0;
for (i = 0; i < 8; i++) {
data = mmc_test_burst2(ast, i);
if ((data & 0xff) && (data & 0xff00))
return 0;
}
if (!data)
return 3;
else if (data & 0xff)
return 2;
return 1;
}
static int cbr_scan(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
data2 = 3;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < CBR_PASSNUM2; loop++) {
if ((data = cbr_test(ast)) != 0) {
data2 &= data;
if (!data2)
return 0;
break;
}
}
if (loop == CBR_PASSNUM2)
return 0;
}
return data2;
}
static u32 cbr_test2(struct ast_device *ast)
{
u32 data;
data = mmc_test_burst2(ast, 0);
if (data == 0xffff)
return 0;
data |= mmc_test_single2(ast, 0);
if (data == 0xffff)
return 0;
return ~data & 0xffff;
}
static u32 cbr_scan2(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
data2 = 0xffff;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < CBR_PASSNUM2; loop++) {
if ((data = cbr_test2(ast)) != 0) {
data2 &= data;
if (!data2)
return 0;
break;
}
}
if (loop == CBR_PASSNUM2)
return 0;
}
return data2;
}
static bool cbr_test3(struct ast_device *ast)
{
if (!mmc_test_burst(ast, 0))
return false;
if (!mmc_test_single(ast, 0))
return false;
return true;
}
static bool cbr_scan3(struct ast_device *ast)
{
u32 patcnt, loop;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < 2; loop++) {
if (cbr_test3(ast))
break;
}
if (loop == 2)
return false;
}
return true;
}
static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
bool status = false;
FINETUNE_START:
for (cnt = 0; cnt < 16; cnt++) {
dllmin[cnt] = 0xff;
dllmax[cnt] = 0x0;
}
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
data = cbr_scan2(ast);
if (data != 0) {
mask = 0x00010001;
for (cnt = 0; cnt < 16; cnt++) {
if (data & mask) {
if (dllmin[cnt] > dlli) {
dllmin[cnt] = dlli;
}
if (dllmax[cnt] < dlli) {
dllmax[cnt] = dlli;
}
}
mask <<= 1;
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD2) {
break;
}
}
gold_sadj[0] = 0x0;
passcnt = 0;
for (cnt = 0; cnt < 16; cnt++) {
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
gold_sadj[0] += dllmin[cnt];
passcnt++;
}
}
if (retry++ > 10)
goto FINETUNE_DONE;
if (passcnt != 16) {
goto FINETUNE_START;
}
status = true;
FINETUNE_DONE:
gold_sadj[0] = gold_sadj[0] >> 4;
gold_sadj[1] = gold_sadj[0];
data = 0;
for (cnt = 0; cnt < 8; cnt++) {
data >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
dlli = dllmin[cnt];
if (gold_sadj[0] >= dlli) {
dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
if (dlli > 3) {
dlli = 3;
}
} else {
dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
if (dlli > 4) {
dlli = 4;
}
dlli = (8 - dlli) & 0x7;
}
data |= dlli << 21;
}
}
ast_moutdwm(ast, 0x1E6E0080, data);
data = 0;
for (cnt = 8; cnt < 16; cnt++) {
data >>= 3;
if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
dlli = dllmin[cnt];
if (gold_sadj[1] >= dlli) {
dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
if (dlli > 3) {
dlli = 3;
} else {
dlli = (dlli - 1) & 0x7;
}
} else {
dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
dlli += 1;
if (dlli > 4) {
dlli = 4;
}
dlli = (8 - dlli) & 0x7;
}
data |= dlli << 21;
}
}
ast_moutdwm(ast, 0x1E6E0084, data);
return status;
} /* finetuneDQI_L */
static void finetuneDQSI(struct ast_device *ast)
{
u32 dlli, dqsip, dqidly;
u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
u32 g_dqidly, g_dqsip, g_margin, g_side;
u16 pass[32][2][2];
char tag[2][76];
/* Disable DQI CBR */
reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
reg_mcr18 &= 0x0000ffff;
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
for (dlli = 0; dlli < 76; dlli++) {
tag[0][dlli] = 0x0;
tag[1][dlli] = 0x0;
}
for (dqidly = 0; dqidly < 32; dqidly++) {
pass[dqidly][0][0] = 0xff;
pass[dqidly][0][1] = 0x0;
pass[dqidly][1][0] = 0xff;
pass[dqidly][1][1] = 0x0;
}
for (dqidly = 0; dqidly < 32; dqidly++) {
passcnt[0] = passcnt[1] = 0;
for (dqsip = 0; dqsip < 2; dqsip++) {
ast_moutdwm(ast, 0x1E6E000C, 0);
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
for (dlli = 0; dlli < 76; dlli++) {
ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
ast_moutdwm(ast, 0x1E6E0070, 0);
ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
if (cbr_scan3(ast)) {
if (dlli == 0)
break;
passcnt[dqsip]++;
tag[dqsip][dlli] = 'P';
if (dlli < pass[dqidly][dqsip][0])
pass[dqidly][dqsip][0] = (u16) dlli;
if (dlli > pass[dqidly][dqsip][1])
pass[dqidly][dqsip][1] = (u16) dlli;
} else if (passcnt[dqsip] >= 5)
break;
else {
pass[dqidly][dqsip][0] = 0xff;
pass[dqidly][dqsip][1] = 0x0;
}
}
}
if (passcnt[0] == 0 && passcnt[1] == 0)
dqidly++;
}
/* Search margin */
g_dqidly = g_dqsip = g_margin = g_side = 0;
for (dqidly = 0; dqidly < 32; dqidly++) {
for (dqsip = 0; dqsip < 2; dqsip++) {
if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
continue;
diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
if ((diff+2) < g_margin)
continue;
passcnt[0] = passcnt[1] = 0;
for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
if (passcnt[0] > passcnt[1])
passcnt[0] = passcnt[1];
passcnt[1] = 0;
if (passcnt[0] > g_side)
passcnt[1] = passcnt[0] - g_side;
if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
g_margin = diff;
g_dqidly = dqidly;
g_dqsip = dqsip;
g_side = passcnt[0];
} else if (passcnt[1] > 1 && g_side < 8) {
if (diff > g_margin)
g_margin = diff;
g_dqidly = dqidly;
g_dqsip = dqsip;
g_side = passcnt[0];
}
}
}
reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
}
static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
bool status = false;
finetuneDQSI(ast);
if (finetuneDQI_L(ast, param) == false)
return status;
CBR_START2:
dllmin[0] = dllmin[1] = 0xff;
dllmax[0] = dllmax[1] = 0x0;
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
data = cbr_scan(ast);
if (data != 0) {
if (data & 0x1) {
if (dllmin[0] > dlli) {
dllmin[0] = dlli;
}
if (dllmax[0] < dlli) {
dllmax[0] = dlli;
}
}
if (data & 0x2) {
if (dllmin[1] > dlli) {
dllmin[1] = dlli;
}
if (dllmax[1] < dlli) {
dllmax[1] = dlli;
}
}
passcnt++;
} else if (passcnt >= CBR_THRESHOLD) {
break;
}
}
if (retry++ > 10)
goto CBR_DONE2;
if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
goto CBR_START2;
}
if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
goto CBR_START2;
}
status = true;
CBR_DONE2:
dlli = (dllmin[1] + dllmax[1]) >> 1;
dlli <<= 8;
dlli += (dllmin[0] + dllmax[0]) >> 1;
ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
return status;
} /* CBRDLL2 */
static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
/* Ger trap info */
trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
trap_AC2 = 0x00020000 + (trap << 16);
trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
trap_MRS = 0x00000010 + (trap << 4);
trap_MRS |= ((trap & 0x2) << 18);
param->reg_MADJ = 0x00034C4C;
param->reg_SADJ = 0x00001800;
param->reg_DRV = 0x000000F0;
param->reg_PERIOD = param->dram_freq;
param->rodt = 0;
switch (param->dram_freq) {
case 336:
ast_moutdwm(ast, 0x1E6E2020, 0x0190);
param->wodt = 0;
param->reg_AC1 = 0x22202725;
param->reg_AC2 = 0xAA007613 | trap_AC2;
param->reg_DQSIC = 0x000000BA;
param->reg_MRS = 0x04001400 | trap_MRS;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000074;
param->reg_FREQ = 0x00004DC0;
param->madj_max = 96;
param->dll2_finetune_step = 3;
switch (param->dram_chipid) {
default:
case AST_DRAM_512Mx16:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xAA007613 | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xAA00761C | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xAA007636 | trap_AC2;
break;
}
break;
default:
case 396:
ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
param->wodt = 1;
param->reg_AC1 = 0x33302825;
param->reg_AC2 = 0xCC009617 | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x04001600 | trap_MRS;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000034;
param->reg_DRV = 0x000000FA;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x00005040;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
default:
case AST_DRAM_512Mx16:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC009617 | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC009622 | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00963F | trap_AC2;
break;
}
break;
case 408:
ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
param->wodt = 1;
param->reg_AC1 = 0x33302825;
param->reg_AC2 = 0xCC009617 | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x04001600 | trap_MRS;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000023;
param->reg_DRV = 0x000000FA;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
default:
case AST_DRAM_512Mx16:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC009617 | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC009622 | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00963F | trap_AC2;
break;
}
break;
case 456:
ast_moutdwm(ast, 0x1E6E2020, 0x0230);
param->wodt = 0;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xCD44961A;
param->reg_DQSIC = 0x000000FC;
param->reg_MRS = 0x00081830;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x00000097;
param->reg_FREQ = 0x000052C0;
param->madj_max = 88;
param->dll2_finetune_step = 4;
break;
case 504:
ast_moutdwm(ast, 0x1E6E2020, 0x0270);
param->wodt = 1;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xDE44A61D;
param->reg_DQSIC = 0x00000117;
param->reg_MRS = 0x00081A30;
param->reg_EMRS = 0x00000000;
param->reg_IOZ = 0x070000BB;
param->reg_DQIDLY = 0x000000A0;
param->reg_FREQ = 0x000054C0;
param->madj_max = 79;
param->dll2_finetune_step = 4;
break;
case 528:
ast_moutdwm(ast, 0x1E6E2020, 0x0290);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xEF44B61E;
param->reg_DQSIC = 0x00000125;
param->reg_MRS = 0x00081A30;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000088;
param->reg_FREQ = 0x000055C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
case 576:
ast_moutdwm(ast, 0x1E6E2020, 0x0140);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302A37;
param->reg_AC2 = 0xEF56B61E;
param->reg_DQSIC = 0x0000013F;
param->reg_MRS = 0x00101A50;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000078;
param->reg_FREQ = 0x000057C0;
param->madj_max = 136;
param->dll2_finetune_step = 3;
break;
case 600:
ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x32302A37;
param->reg_AC2 = 0xDF56B61F;
param->reg_DQSIC = 0x0000014D;
param->reg_MRS = 0x00101A50;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000078;
param->reg_FREQ = 0x000058C0;
param->madj_max = 132;
param->dll2_finetune_step = 3;
break;
case 624:
ast_moutdwm(ast, 0x1E6E2020, 0x0160);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x32302A37;
param->reg_AC2 = 0xEF56B621;
param->reg_DQSIC = 0x0000015A;
param->reg_MRS = 0x02101A50;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000078;
param->reg_FREQ = 0x000059C0;
param->madj_max = 128;
param->dll2_finetune_step = 3;
break;
} /* switch freq */
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->dram_config = 0x130;
break;
default:
case AST_DRAM_1Gx16:
param->dram_config = 0x131;
break;
case AST_DRAM_2Gx16:
param->dram_config = 0x132;
break;
case AST_DRAM_4Gx16:
param->dram_config = 0x133;
break;
} /* switch size */
switch (param->vram_size) {
default:
case AST_VIDMEM_SIZE_8M:
param->dram_config |= 0x00;
break;
case AST_VIDMEM_SIZE_16M:
param->dram_config |= 0x04;
break;
case AST_VIDMEM_SIZE_32M:
param->dram_config |= 0x08;
break;
case AST_VIDMEM_SIZE_64M:
param->dram_config |= 0x0c;
break;
}
}
static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
ddr3_init_start:
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
udelay(10);
ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
udelay(10);
ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
udelay(10);
ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
ast_moutdwm(ast, 0x1E6E0054, 0);
ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
/* Wait MCLK2X lock to MCLK */
do {
data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
if ((data2 & 0xff) > param->madj_max) {
break;
}
ast_moutdwm(ast, 0x1E6E0064, data2);
if (data2 & 0x00100000) {
data2 = ((data2 & 0xff) >> 3) + 3;
} else {
data2 = ((data2 & 0xff) >> 2) + 5;
}
data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
data2 += data & 0xff;
data = data | (data2 << 8);
ast_moutdwm(ast, 0x1E6E0068, data);
udelay(10);
ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
udelay(10);
data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
ast_moutdwm(ast, 0x1E6E0018, data);
data = data | 0x200;
ast_moutdwm(ast, 0x1E6E0018, data);
do {
data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
}
ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
ast_moutdwm(ast, 0x1E6E0018, data);
ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
udelay(50);
/* Mode Register Setting */
ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
data = 0;
if (param->wodt) {
data = 0x300;
}
if (param->rodt) {
data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
}
ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
/* Calibrate the DQSI delay */
if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
goto ddr3_init_start;
ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
/* ECC Memory Initialization */
#ifdef ECC
ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
ast_moutdwm(ast, 0x1E6E0070, 0x221);
do {
data = ast_mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
#endif
}
static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
/* Ger trap info */
trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
trap_AC2 = (trap << 20) | (trap << 16);
trap_AC2 += 0x00110000;
trap_MRS = 0x00000040 | (trap << 4);
param->reg_MADJ = 0x00034C4C;
param->reg_SADJ = 0x00001800;
param->reg_DRV = 0x000000F0;
param->reg_PERIOD = param->dram_freq;
param->rodt = 0;
switch (param->dram_freq) {
case 264:
ast_moutdwm(ast, 0x1E6E2020, 0x0130);
param->wodt = 0;
param->reg_AC1 = 0x11101513;
param->reg_AC2 = 0x78117011;
param->reg_DQSIC = 0x00000092;
param->reg_MRS = 0x00000842;
param->reg_EMRS = 0x00000000;
param->reg_DRV = 0x000000F0;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x0000005A;
param->reg_FREQ = 0x00004AC0;
param->madj_max = 138;
param->dll2_finetune_step = 3;
break;
case 336:
ast_moutdwm(ast, 0x1E6E2020, 0x0190);
param->wodt = 1;
param->reg_AC1 = 0x22202613;
param->reg_AC2 = 0xAA009016 | trap_AC2;
param->reg_DQSIC = 0x000000BA;
param->reg_MRS = 0x00000A02 | trap_MRS;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000074;
param->reg_FREQ = 0x00004DC0;
param->madj_max = 96;
param->dll2_finetune_step = 3;
switch (param->dram_chipid) {
default:
case AST_DRAM_512Mx16:
param->reg_AC2 = 0xAA009012 | trap_AC2;
break;
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xAA009016 | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xAA009023 | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xAA00903B | trap_AC2;
break;
}
break;
default:
case 396:
ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
param->wodt = 1;
param->rodt = 0;
param->reg_AC1 = 0x33302714;
param->reg_AC2 = 0xCC00B01B | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x00000C02 | trap_MRS;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x00005040;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->reg_AC2 = 0xCC00B016 | trap_AC2;
break;
default:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC00B01B | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC00B02B | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00B03F | trap_AC2;
break;
}
break;
case 408:
ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
param->wodt = 1;
param->rodt = 0;
param->reg_AC1 = 0x33302714;
param->reg_AC2 = 0xCC00B01B | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x00000C02 | trap_MRS;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
param->madj_max = 96;
param->dll2_finetune_step = 4;
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->reg_AC2 = 0xCC00B016 | trap_AC2;
break;
default:
case AST_DRAM_1Gx16:
param->reg_AC2 = 0xCC00B01B | trap_AC2;
break;
case AST_DRAM_2Gx16:
param->reg_AC2 = 0xCC00B02B | trap_AC2;
break;
case AST_DRAM_4Gx16:
param->reg_AC2 = 0xCC00B03F | trap_AC2;
break;
}
break;
case 456:
ast_moutdwm(ast, 0x1E6E2020, 0x0230);
param->wodt = 0;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xCD44B01E;
param->reg_DQSIC = 0x000000FC;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000000;
param->reg_DRV = 0x00000000;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000097;
param->reg_FREQ = 0x000052C0;
param->madj_max = 88;
param->dll2_finetune_step = 3;
break;
case 504:
ast_moutdwm(ast, 0x1E6E2020, 0x0261);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xDE44C022;
param->reg_DQSIC = 0x00000117;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x0000000A;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000A0;
param->reg_FREQ = 0x000054C0;
param->madj_max = 79;
param->dll2_finetune_step = 3;
break;
case 528:
ast_moutdwm(ast, 0x1E6E2020, 0x0120);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xEF44D024;
param->reg_DQSIC = 0x00000125;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F9;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000A7;
param->reg_FREQ = 0x000055C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
case 552:
ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x43402915;
param->reg_AC2 = 0xFF44E025;
param->reg_DQSIC = 0x00000132;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000040;
param->reg_DRV = 0x0000000A;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000AD;
param->reg_FREQ = 0x000056C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
case 576:
ast_moutdwm(ast, 0x1E6E2020, 0x0140);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x43402915;
param->reg_AC2 = 0xFF44E027;
param->reg_DQSIC = 0x0000013F;
param->reg_MRS = 0x00000E72;
param->reg_EMRS = 0x00000004;
param->reg_DRV = 0x000000F5;
param->reg_IOZ = 0x00000045;
param->reg_DQIDLY = 0x000000B3;
param->reg_FREQ = 0x000057C0;
param->madj_max = 76;
param->dll2_finetune_step = 3;
break;
}
switch (param->dram_chipid) {
case AST_DRAM_512Mx16:
param->dram_config = 0x100;
break;
default:
case AST_DRAM_1Gx16:
param->dram_config = 0x121;
break;
case AST_DRAM_2Gx16:
param->dram_config = 0x122;
break;
case AST_DRAM_4Gx16:
param->dram_config = 0x123;
break;
} /* switch size */
switch (param->vram_size) {
default:
case AST_VIDMEM_SIZE_8M:
param->dram_config |= 0x00;
break;
case AST_VIDMEM_SIZE_16M:
param->dram_config |= 0x04;
break;
case AST_VIDMEM_SIZE_32M:
param->dram_config |= 0x08;
break;
case AST_VIDMEM_SIZE_64M:
param->dram_config |= 0x0c;
break;
}
}
static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
ddr2_init_start:
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
udelay(10);
ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
udelay(10);
ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
ast_moutdwm(ast, 0x1E6E0054, 0);
ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
/* Wait MCLK2X lock to MCLK */
do {
data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
if ((data2 & 0xff) > param->madj_max) {
break;
}
ast_moutdwm(ast, 0x1E6E0064, data2);
if (data2 & 0x00100000) {
data2 = ((data2 & 0xff) >> 3) + 3;
} else {
data2 = ((data2 & 0xff) >> 2) + 5;
}
data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
data2 += data & 0xff;
data = data | (data2 << 8);
ast_moutdwm(ast, 0x1E6E0068, data);
udelay(10);
ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
udelay(10);
data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
ast_moutdwm(ast, 0x1E6E0018, data);
data = data | 0x200;
ast_moutdwm(ast, 0x1E6E0018, data);
do {
data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
}
ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
ast_moutdwm(ast, 0x1E6E0018, data);
ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
udelay(50);
/* Mode Register Setting */
ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
data = 0;
if (param->wodt) {
data = 0x500;
}
if (param->rodt) {
data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
}
ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
/* Calibrate the DQSI delay */
if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
goto ddr2_init_start;
/* ECC Memory Initialization */
#ifdef ECC
ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
ast_moutdwm(ast, 0x1E6E0070, 0x221);
do {
data = ast_mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
#endif
}
static void ast_post_chip_2300(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
struct ast2300_dram_param param;
u32 temp;
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if ((reg & 0x80) == 0) {/* vga only */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688a8a8);
do {
;
} while (ast_read32(ast, 0x12000) != 0x1);
ast_write32(ast, 0x10000, 0xfc600309);
do {
;
} while (ast_read32(ast, 0x10000) != 0x1);
/* Slow down CPU/AHB CLK in VGA only mode */
temp = ast_read32(ast, 0x12008);
temp |= 0x73;
ast_write32(ast, 0x12008, temp);
param.dram_freq = 396;
param.dram_type = AST_DDR3;
temp = ast_mindwm(ast, 0x1e6e2070);
if (temp & 0x01000000)
param.dram_type = AST_DDR2;
switch (temp & 0x18000000) {
case 0:
param.dram_chipid = AST_DRAM_512Mx16;
break;
default:
case 0x08000000:
param.dram_chipid = AST_DRAM_1Gx16;
break;
case 0x10000000:
param.dram_chipid = AST_DRAM_2Gx16;
break;
case 0x18000000:
param.dram_chipid = AST_DRAM_4Gx16;
break;
}
switch (temp & 0x0c) {
default:
case 0x00:
param.vram_size = AST_VIDMEM_SIZE_8M;
break;
case 0x04:
param.vram_size = AST_VIDMEM_SIZE_16M;
break;
case 0x08:
param.vram_size = AST_VIDMEM_SIZE_32M;
break;
case 0x0c:
param.vram_size = AST_VIDMEM_SIZE_64M;
break;
}
if (param.dram_type == AST_DDR3) {
get_ddr3_info(ast, ¶m);
ddr3_init(ast, ¶m);
} else {
get_ddr2_info(ast, ¶m);
ddr2_init(ast, ¶m);
}
temp = ast_mindwm(ast, 0x1e6e2040);
ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
}
/* wait ready */
do {
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
} while ((reg & 0x40) == 0);
}
static bool cbr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
if (!mmc_test_burst(ast, 0))
return false;
if (!mmc_test_single_2500(ast, 0))
return false;
return true;
}
static bool ddr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
if (!mmc_test_burst(ast, 0))
return false;
if (!mmc_test_burst(ast, 1))
return false;
if (!mmc_test_burst(ast, 2))
return false;
if (!mmc_test_burst(ast, 3))
return false;
if (!mmc_test_single_2500(ast, 0))
return false;
return true;
}
static void ddr_init_common_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
}
static void ddr_phy_init_2500(struct ast_device *ast)
{
u32 data, pass, timecnt;
pass = 0;
ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
while (!pass) {
for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
if (!data)
break;
}
if (timecnt != TIMEOUT) {
data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
if (!data)
pass = 1;
}
if (!pass) {
ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
udelay(10); /* delay 10 us */
ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
}
}
ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
}
/*
* Check DRAM Size
* 1Gb : 0x80000000 ~ 0x87FFFFFF
* 2Gb : 0x80000000 ~ 0x8FFFFFFF
* 4Gb : 0x80000000 ~ 0x9FFFFFFF
* 8Gb : 0x80000000 ~ 0xBFFFFFFF
*/
static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
{
u32 reg_04, reg_14;
reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
ast_moutdwm(ast, 0xA0100000, 0x41424344);
ast_moutdwm(ast, 0x90100000, 0x35363738);
ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
/* Check 8Gbit */
if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
reg_04 |= 0x03;
reg_14 |= (tRFC >> 24) & 0xFF;
/* Check 4Gbit */
} else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
reg_04 |= 0x02;
reg_14 |= (tRFC >> 16) & 0xFF;
/* Check 2Gbit */
} else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
reg_04 |= 0x01;
reg_14 |= (tRFC >> 8) & 0xFF;
} else {
reg_14 |= tRFC & 0xFF;
}
ast_moutdwm(ast, 0x1E6E0004, reg_04);
ast_moutdwm(ast, 0x1E6E0014, reg_14);
}
static void enable_cache_2500(struct ast_device *ast)
{
u32 reg_04, data;
reg_04 = ast_mindwm(ast, 0x1E6E0004);
ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
do
data = ast_mindwm(ast, 0x1E6E0004);
while (!(data & 0x80000));
ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
}
static void set_mpll_2500(struct ast_device *ast)
{
u32 addr, data, param;
/* Reset MMC */
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
ast_moutdwm(ast, addr, 0x0);
addr += 4;
}
ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
if (data) {
/* CLKIN = 25MHz */
param = 0x930023E0;
ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
} else {
/* CLKIN = 24MHz */
param = 0x93002400;
}
ast_moutdwm(ast, 0x1E6E2020, param);
udelay(100);
}
static void reset_mmc_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E78505C, 0x00000004);
ast_moutdwm(ast, 0x1E785044, 0x00000001);
ast_moutdwm(ast, 0x1E785048, 0x00004755);
ast_moutdwm(ast, 0x1E78504C, 0x00000013);
mdelay(100);
ast_moutdwm(ast, 0x1E785054, 0x00000077);
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
}
static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
/* DDR PHY Setting */
ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
/* Controller Setting */
ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
/* Wait DDR PHY init done */
ddr_phy_init_2500(ast);
ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
enable_cache_2500(ast);
ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
u32 data, data2, pass, retrycnt;
u32 ddr_vref, phy_vref;
u32 min_ddr_vref = 0, min_phy_vref = 0;
u32 max_ddr_vref = 0, max_phy_vref = 0;
ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
/* DDR PHY Setting */
ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
/* Controller Setting */
ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
/* Train PHY Vref first */
pass = 0;
for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
max_phy_vref = 0x0;
pass = 0;
ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
/* Fire DFI Init */
ddr_phy_init_2500(ast);
ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
if (cbr_test_2500(ast)) {
pass++;
data = ast_mindwm(ast, 0x1E6E03D0);
data2 = data >> 8;
data = data & 0xff;
if (data > data2)
data = data2;
if (max_phy_vref < data) {
max_phy_vref = data;
min_phy_vref = phy_vref;
}
} else if (pass > 0)
break;
}
}
ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
/* Train DDR Vref next */
pass = 0;
for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
min_ddr_vref = 0xFF;
max_ddr_vref = 0x0;
pass = 0;
for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
/* Fire DFI Init */
ddr_phy_init_2500(ast);
ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
if (cbr_test_2500(ast)) {
pass++;
if (min_ddr_vref > ddr_vref)
min_ddr_vref = ddr_vref;
if (max_ddr_vref < ddr_vref)
max_ddr_vref = ddr_vref;
} else if (pass != 0)
break;
}
}
ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
/* Wait DDR PHY init done */
ddr_phy_init_2500(ast);
ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
enable_cache_2500(ast);
ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
static bool ast_dram_init_2500(struct ast_device *ast)
{
u32 data;
u32 max_tries = 5;
do {
if (max_tries-- == 0)
return false;
set_mpll_2500(ast);
reset_mmc_2500(ast);
ddr_init_common_2500(ast);
data = ast_mindwm(ast, 0x1E6E2070);
if (data & 0x01000000)
ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
else
ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
} while (!ddr_test_2500(ast));
ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
/* Patch code */
data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
return true;
}
void ast_patch_ahb_2500(struct ast_device *ast)
{
u32 data;
/* Clear bus lock condition */
ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
ast_moutdwm(ast, 0x1e600084, 0x00010000);
ast_moutdwm(ast, 0x1e600088, 0x00000000);
ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
data = ast_mindwm(ast, 0x1e6e2070);
if (data & 0x08000000) { /* check fast reset */
/*
* If "Fast restet" is enabled for ARM-ICE debugger,
* then WDT needs to enable, that
* WDT04 is WDT#1 Reload reg.
* WDT08 is WDT#1 counter restart reg to avoid system deadlock
* WDT0C is WDT#1 control reg
* [6:5]:= 01:Full chip
* [4]:= 1:1MHz clock source
* [1]:= 1:WDT will be cleeared and disabled after timeout occurs
* [0]:= 1:WDT enable
*/
ast_moutdwm(ast, 0x1E785004, 0x00000010);
ast_moutdwm(ast, 0x1E785008, 0x00004755);
ast_moutdwm(ast, 0x1E78500c, 0x00000033);
udelay(1000);
}
do {
ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
data = ast_mindwm(ast, 0x1e6e2000);
} while (data != 1);
ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */
}
void ast_post_chip_2500(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u32 temp;
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
ast_patch_ahb_2500(ast);
/* Disable watchdog */
ast_moutdwm(ast, 0x1E78502C, 0x00000000);
ast_moutdwm(ast, 0x1E78504C, 0x00000000);
/*
* Reset USB port to patch USB unknown device issue
* SCU90 is Multi-function Pin Control #5
* [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
* port).
* SCU94 is Multi-function Pin Control #6
* [14:13]:= 1x:USB2.0 Host2 controller
* SCU70 is Hardware Strap reg
* [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
* [18]: 0(24)/1(48) MHz)
* SCU7C is Write clear reg to SCU70
* [23]:= write 1 and then SCU70[23] will be clear as 0b.
*/
ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
mdelay(100);
ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
}
/* Modify eSPI reset pin */
temp = ast_mindwm(ast, 0x1E6E2070);
if (temp & 0x02000000)
ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
/* Slow down CPU/AHB CLK in VGA only mode */
temp = ast_read32(ast, 0x12008);
temp |= 0x73;
ast_write32(ast, 0x12008, temp);
if (!ast_dram_init_2500(ast))
drm_err(dev, "DRAM init failed !\n");
temp = ast_mindwm(ast, 0x1e6e2040);
ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
}
/* wait ready */
do {
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
} while ((reg & 0x40) == 0);
}
| linux-master | drivers/gpu/drm/ast/ast_post.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2021, ASPEED Technology Inc.
// Authors: KuoHsiang Chou <[email protected]>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
bool ast_astdp_is_connected(struct ast_device *ast)
{
if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING))
return false;
if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))
return false;
if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS))
return false;
return true;
}
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_device *ast = to_ast_device(dev);
u8 i = 0, j = 0;
/*
* CRD1[b5]: DP MCU FW is executing
* CRDC[b0]: DP link success
* CRDF[b0]: DP HPD
* CRE5[b0]: Host reading EDID process is done
*/
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
ASTDP_HOST_EDID_READ_DONE_MASK))) {
goto err_astdp_edid_not_ready;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
0x00);
for (i = 0; i < 32; i++) {
/*
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE4,
ASTDP_AND_CLEAR_MASK, (u8)i);
j = 0;
/*
* CRD7[b0]: valid flag for EDID
* CRD6[b0]: mirror read pointer for EDID
*/
while ((ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD7,
ASTDP_EDID_VALID_FLAG_MASK) != 0x01) ||
(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD6,
ASTDP_EDID_READ_POINTER_MASK) != i)) {
/*
* Delay are getting longer with each retry.
* 1. The Delays are often 2 loops when users request "Display Settings"
* of right-click of mouse.
* 2. The Delays are often longer a lot when system resume from S3/S4.
*/
mdelay(j+1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1,
ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC,
ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))) {
goto err_astdp_jump_out_loop_of_edid;
}
j++;
if (j > 200)
goto err_astdp_jump_out_loop_of_edid;
}
*(ediddata) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
0xD8, ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD9,
ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDA,
ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDB,
ASTDP_EDID_READ_DATA_MASK);
if (i == 31) {
/*
* For 128-bytes EDID_1.3,
* 1. Add the value of Bytes-126 to Bytes-127.
* The Bytes-127 is Checksum. Sum of all 128bytes should
* equal 0 (mod 256).
* 2. Modify Bytes-126 to be 0.
* The Bytes-126 indicates the Number of extensions to
* follow. 0 represents noextensions.
*/
*(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2);
*(ediddata + 2) = 0;
}
ediddata += 4;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
return 0;
err_astdp_jump_out_loop_of_edid:
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
return (~(j+256) + 1);
err_astdp_edid_not_ready:
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING)))
return (~0xD1 + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS)))
return (~0xDC + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)))
return (~0xDF + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK)))
return (~0xE5 + 1);
return 0;
}
/*
* Launch Aspeed DP
*/
void ast_dp_launch(struct drm_device *dev)
{
u32 i = 0;
u8 bDPExecute = 1;
struct ast_device *ast = to_ast_device(dev);
// Wait one second then timeout.
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
ASTDP_MCU_FW_EXECUTING) {
i++;
// wait 100 ms
msleep(100);
if (i >= 10) {
// DP would not be ready.
bDPExecute = 0;
break;
}
}
if (!bDPExecute)
drm_err(dev, "Wait DPMCU executing timeout\n");
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
}
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE);
// Turn on DP PHY sleep
if (!on)
bE3 |= AST_DP_PHY_SLEEP;
// DP Power on/off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
}
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
// If DP plug in and link successful then check video on / off status
if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)) {
video_on_off <<= 4;
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF,
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
}
}
}
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
u32 ulRefreshRateIndex;
u8 ModeIdx;
ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1;
switch (crtc->mode.crtc_hdisplay) {
case 320:
ModeIdx = ASTDP_320x240_60;
break;
case 400:
ModeIdx = ASTDP_400x300_60;
break;
case 512:
ModeIdx = ASTDP_512x384_60;
break;
case 640:
ModeIdx = (ASTDP_640x480_60 + (u8) ulRefreshRateIndex);
break;
case 800:
ModeIdx = (ASTDP_800x600_56 + (u8) ulRefreshRateIndex);
break;
case 1024:
ModeIdx = (ASTDP_1024x768_60 + (u8) ulRefreshRateIndex);
break;
case 1152:
ModeIdx = ASTDP_1152x864_75;
break;
case 1280:
if (crtc->mode.crtc_vdisplay == 800)
ModeIdx = (ASTDP_1280x800_60_RB - (u8) ulRefreshRateIndex);
else // 1024
ModeIdx = (ASTDP_1280x1024_60 + (u8) ulRefreshRateIndex);
break;
case 1360:
case 1366:
ModeIdx = ASTDP_1366x768_60;
break;
case 1440:
ModeIdx = (ASTDP_1440x900_60_RB - (u8) ulRefreshRateIndex);
break;
case 1600:
if (crtc->mode.crtc_vdisplay == 900)
ModeIdx = (ASTDP_1600x900_60_RB - (u8) ulRefreshRateIndex);
else //1200
ModeIdx = ASTDP_1600x1200_60;
break;
case 1680:
ModeIdx = (ASTDP_1680x1050_60_RB - (u8) ulRefreshRateIndex);
break;
case 1920:
if (crtc->mode.crtc_vdisplay == 1080)
ModeIdx = ASTDP_1920x1080_60;
else //1200
ModeIdx = ASTDP_1920x1200_60;
break;
default:
return;
}
/*
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0, ASTDP_AND_CLEAR_MASK,
ASTDP_MISC0_24bpp);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
}
| linux-master | drivers/gpu/drm/ast/ast_dp.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <[email protected]>
*/
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include "ast_drv.h"
static bool ast_is_vga_enabled(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u8 ch;
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
return !!(ch & 0x01);
}
static void ast_enable_vga(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
}
/*
* Run this function as part of the HW device cleanup; not
* when the DRM device gets released.
*/
static void ast_enable_mmio_release(void *data)
{
struct ast_device *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
static int ast_enable_mmio(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
}
static void ast_open_key(struct ast_device *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
static int ast_device_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct device_node *np = dev->dev->of_node;
uint32_t scu_rev = 0xffffffff;
u32 data;
u8 jregd0, jregd1;
/*
* Find configuration mode and read SCU revision
*/
ast->config_mode = ast_use_defaults;
/* Check if we have device-tree properties */
if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
/* We do, disable P2A access */
ast->config_mode = ast_use_dt;
scu_rev = data;
} else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
/*
* The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
* is disabled. We force using P2A if VGA only mode bit
* is set D[7]
*/
jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/*
* We have a P2A bridge and it is enabled.
*/
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
ast_patch_ahb_2500(ast);
}
/* Double check that it's actually working */
data = ast_read32(ast, 0xf004);
if ((data != 0xffffffff) && (data != 0x00)) {
ast->config_mode = ast_use_p2a;
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
scu_rev = ast_read32(ast, 0x1207c);
}
}
}
switch (ast->config_mode) {
case ast_use_defaults:
drm_info(dev, "Using default configuration\n");
break;
case ast_use_dt:
drm_info(dev, "Using device-tree for configuration\n");
break;
case ast_use_p2a:
drm_info(dev, "Using P2A bridge for configuration\n");
break;
}
/*
* Identify chipset
*/
if (pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
} else if (pdev->revision >= 0x40) {
switch (scu_rev & 0x300) {
case 0x0100:
ast->chip = AST2510;
drm_info(dev, "AST 2510 detected\n");
break;
default:
ast->chip = AST2500;
drm_info(dev, "AST 2500 detected\n");
}
} else if (pdev->revision >= 0x30) {
switch (scu_rev & 0x300) {
case 0x0100:
ast->chip = AST1400;
drm_info(dev, "AST 1400 detected\n");
break;
default:
ast->chip = AST2400;
drm_info(dev, "AST 2400 detected\n");
}
} else if (pdev->revision >= 0x20) {
switch (scu_rev & 0x300) {
case 0x0000:
ast->chip = AST1300;
drm_info(dev, "AST 1300 detected\n");
break;
default:
ast->chip = AST2300;
drm_info(dev, "AST 2300 detected\n");
break;
}
} else if (pdev->revision >= 0x10) {
switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
drm_info(dev, "AST 1100 detected\n");
break;
case 0x0100:
ast->chip = AST2200;
drm_info(dev, "AST 2200 detected\n");
break;
case 0x0000:
ast->chip = AST2150;
drm_info(dev, "AST 2150 detected\n");
break;
default:
ast->chip = AST2100;
drm_info(dev, "AST 2100 detected\n");
break;
}
} else {
ast->chip = AST2000;
drm_info(dev, "AST 2000 detected\n");
}
return 0;
}
static void ast_detect_widescreen(struct ast_device *ast)
{
u8 jreg;
/* Check if we support wide screen */
switch (AST_GEN(ast)) {
case 1:
ast->support_wide_screen = false;
break;
default:
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if (!(jreg & 0x80))
ast->support_wide_screen = true;
else if (jreg & 0x01)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
if (ast->chip == AST1300)
ast->support_wide_screen = true;
if (ast->chip == AST1400)
ast->support_wide_screen = true;
if (ast->chip == AST2510)
ast->support_wide_screen = true;
if (IS_AST_GEN7(ast))
ast->support_wide_screen = true;
}
break;
}
}
static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
{
struct drm_device *dev = &ast->base;
u8 jreg;
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip_types |= AST_TX_NONE_BIT;
/*
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
* enabled, in that case, assume we have a SIL164 TMDS transmitter
*
* Don't make that assumption if we the chip wasn't enabled and
* is at power-on reset, otherwise we'll incorrectly "detect" a
* SIL164 when there is none.
*/
if (!need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
ast->tx_chip_types = AST_TX_SIL164_BIT;
}
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
/*
* On AST GEN4+, look the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg) {
case 0x04:
ast->tx_chip_types = AST_TX_SIL164_BIT;
break;
case 0x08:
ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
if (ast->dp501_fw_addr) {
/* backup firmware */
if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
drmm_kfree(dev, ast->dp501_fw_addr);
ast->dp501_fw_addr = NULL;
}
}
fallthrough;
case 0x0c:
ast->tx_chip_types = AST_TX_DP501_BIT;
}
} else if (IS_AST_GEN7(ast)) {
if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
ASTDP_DPMCU_TX) {
ast->tx_chip_types = AST_TX_ASTDP_BIT;
ast_dp_launch(&ast->base);
}
}
/* Print stuff for diagnostic purposes */
if (ast->tx_chip_types & AST_TX_NONE_BIT)
drm_info(dev, "Using analog VGA\n");
if (ast->tx_chip_types & AST_TX_SIL164_BIT)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
}
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->dev->of_node;
struct ast_device *ast = to_ast_device(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
switch (ast->config_mode) {
case ast_use_dt:
/*
* If some properties are missing, use reasonable
* defaults for GEN5
*/
if (of_property_read_u32(np, "aspeed,mcr-configuration",
&mcr_cfg))
mcr_cfg = 0x00000577;
if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
&mcr_scu_mpll))
mcr_scu_mpll = 0x000050C0;
if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
&mcr_scu_strap))
mcr_scu_strap = 0;
break;
case ast_use_p2a:
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
mcr_cfg = ast_read32(ast, 0x10004);
mcr_scu_mpll = ast_read32(ast, 0x10120);
mcr_scu_strap = ast_read32(ast, 0x10170);
break;
case ast_use_defaults:
default:
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
if (IS_AST_GEN6(ast))
ast->mclk = 800;
else
ast->mclk = 396;
return 0;
}
if (mcr_cfg & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
if (IS_AST_GEN6(ast)) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_1Gx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_4Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_8Gx16;
break;
}
} else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_1Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (mcr_cfg & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (mcr_cfg & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break;
}
}
if (mcr_scu_strap & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = mcr_scu_mpll & 0x1f;
num = (mcr_scu_mpll & 0x3fe0) >> 5;
dsel = (mcr_scu_mpll & 0xc000) >> 14;
switch (dsel) {
case 3:
div = 0x4;
break;
case 2:
case 1:
div = 0x2;
break;
default:
div = 0x1;
break;
}
ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000));
return 0;
}
struct ast_device *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags)
{
struct drm_device *dev;
struct ast_device *ast;
bool need_post = false;
int ret = 0;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
return ast;
dev = &ast->base;
pci_set_drvdata(pdev, dev);
ret = drmm_mutex_init(dev, &ast->ioregs_lock);
if (ret)
return ERR_PTR(ret);
ast->regs = pcim_iomap(pdev, 1, 0);
if (!ast->regs)
return ERR_PTR(-EIO);
/*
* After AST2500, MMIO is enabled by default, and it should be adopted
* to be compatible with Arm.
*/
if (pdev->revision >= 0x40) {
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
} else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
ast->ioregs = pcim_iomap(pdev, 2, 0);
if (!ast->ioregs)
return ERR_PTR(-EIO);
}
if (!ast_is_vga_enabled(dev)) {
drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
need_post = true;
}
/*
* If VGA isn't enabled, we need to enable now or subsequent
* access to the scratch registers will fail.
*/
if (need_post)
ast_enable_vga(dev);
/* Enable extended register access */
ast_open_key(ast);
ret = ast_enable_mmio(ast);
if (ret)
return ERR_PTR(ret);
ret = ast_device_config_init(ast);
if (ret)
return ERR_PTR(ret);
ast_detect_widescreen(ast);
ast_detect_tx_chip(ast, need_post);
ret = ast_get_dram_info(dev);
if (ret)
return ERR_PTR(ret);
drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
ast->mclk, ast->dram_type, ast->dram_bus_width);
if (need_post)
ast_post_gpu(dev);
ret = ast_mm_init(ast);
if (ret)
return ERR_PTR(ret);
/* map reserved buffer */
ast->dp501_fw_buf = NULL;
if (ast->vram_size < pci_resource_len(pdev, 0)) {
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
if (!ast->dp501_fw_buf)
drm_info(dev, "failed to map reserved buffer!\n");
}
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);
return ast;
}
| linux-master | drivers/gpu/drm/ast/ast_main.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2018 IBM Corporation
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "aspeed_gfx.h"
static int aspeed_gfx_get_modes(struct drm_connector *connector)
{
return drm_add_modes_noedid(connector, 800, 600);
}
static const struct
drm_connector_helper_funcs aspeed_gfx_connector_helper_funcs = {
.get_modes = aspeed_gfx_get_modes,
};
static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int aspeed_gfx_create_output(struct drm_device *drm)
{
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
int ret;
priv->connector.dpms = DRM_MODE_DPMS_OFF;
priv->connector.polled = 0;
drm_connector_helper_add(&priv->connector,
&aspeed_gfx_connector_helper_funcs);
ret = drm_connector_init(drm, &priv->connector,
&aspeed_gfx_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
return ret;
}
| linux-master | drivers/gpu/drm/aspeed/aspeed_gfx_out.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2018 IBM Corporation
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/regmap.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include "aspeed_gfx.h"
static struct aspeed_gfx *
drm_pipe_to_aspeed_gfx(struct drm_simple_display_pipe *pipe)
{
return container_of(pipe, struct aspeed_gfx, pipe);
}
static int aspeed_gfx_set_pixel_fmt(struct aspeed_gfx *priv, u32 *bpp)
{
struct drm_crtc *crtc = &priv->pipe.crtc;
struct drm_device *drm = crtc->dev;
const u32 format = crtc->primary->state->fb->format->format;
u32 ctrl1;
ctrl1 = readl(priv->base + CRT_CTRL1);
ctrl1 &= ~CRT_CTRL_COLOR_MASK;
switch (format) {
case DRM_FORMAT_RGB565:
dev_dbg(drm->dev, "Setting up RGB565 mode\n");
ctrl1 |= CRT_CTRL_COLOR_RGB565;
*bpp = 16;
break;
case DRM_FORMAT_XRGB8888:
dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
ctrl1 |= CRT_CTRL_COLOR_XRGB8888;
*bpp = 32;
break;
default:
dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
return -EINVAL;
}
writel(ctrl1, priv->base + CRT_CTRL1);
return 0;
}
static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv)
{
u32 ctrl1 = readl(priv->base + CRT_CTRL1);
u32 ctrl2 = readl(priv->base + CRT_CTRL2);
/* Set DAC source for display output to Graphics CRT (GFX) */
regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), BIT(16));
writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1);
writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
}
static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv)
{
u32 ctrl1 = readl(priv->base + CRT_CTRL1);
u32 ctrl2 = readl(priv->base + CRT_CTRL2);
writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1);
writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), 0);
}
static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
{
struct drm_display_mode *m = &priv->pipe.crtc.state->adjusted_mode;
u32 ctrl1, d_offset, t_count, bpp;
int err;
err = aspeed_gfx_set_pixel_fmt(priv, &bpp);
if (err)
return;
#if 0
/* TODO: we have only been able to test with the 40MHz USB clock. The
* clock is fixed, so we cannot adjust it here. */
clk_set_rate(priv->pixel_clk, m->crtc_clock * 1000);
#endif
ctrl1 = readl(priv->base + CRT_CTRL1);
ctrl1 &= ~(CRT_CTRL_INTERLACED |
CRT_CTRL_HSYNC_NEGATIVE |
CRT_CTRL_VSYNC_NEGATIVE);
if (m->flags & DRM_MODE_FLAG_INTERLACE)
ctrl1 |= CRT_CTRL_INTERLACED;
if (!(m->flags & DRM_MODE_FLAG_PHSYNC))
ctrl1 |= CRT_CTRL_HSYNC_NEGATIVE;
if (!(m->flags & DRM_MODE_FLAG_PVSYNC))
ctrl1 |= CRT_CTRL_VSYNC_NEGATIVE;
writel(ctrl1, priv->base + CRT_CTRL1);
/* Horizontal timing */
writel(CRT_H_TOTAL(m->htotal - 1) | CRT_H_DE(m->hdisplay - 1),
priv->base + CRT_HORIZ0);
writel(CRT_H_RS_START(m->hsync_start - 1) | CRT_H_RS_END(m->hsync_end),
priv->base + CRT_HORIZ1);
/* Vertical timing */
writel(CRT_V_TOTAL(m->vtotal - 1) | CRT_V_DE(m->vdisplay - 1),
priv->base + CRT_VERT0);
writel(CRT_V_RS_START(m->vsync_start) | CRT_V_RS_END(m->vsync_end),
priv->base + CRT_VERT1);
/*
* Display Offset: address difference between consecutive scan lines
* Terminal Count: memory size of one scan line
*/
d_offset = m->hdisplay * bpp / 8;
t_count = DIV_ROUND_UP(m->hdisplay * bpp, priv->scan_line_max);
writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count),
priv->base + CRT_OFFSET);
/*
* Threshold: FIFO thresholds of refill and stop (16 byte chunks
* per line, rounded up)
*/
writel(priv->throd_val, priv->base + CRT_THROD);
}
static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
struct drm_crtc *crtc = &pipe->crtc;
aspeed_gfx_crtc_mode_set_nofb(priv);
aspeed_gfx_enable_controller(priv);
drm_crtc_vblank_on(crtc);
}
static void aspeed_gfx_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
struct drm_crtc *crtc = &pipe->crtc;
drm_crtc_vblank_off(crtc);
aspeed_gfx_disable_controller(priv);
}
static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
struct drm_crtc *crtc = &pipe->crtc;
struct drm_framebuffer *fb = pipe->plane.state->fb;
struct drm_pending_vblank_event *event;
struct drm_gem_dma_object *gem;
spin_lock_irq(&crtc->dev->event_lock);
event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&crtc->dev->event_lock);
if (!fb)
return;
gem = drm_fb_dma_get_gem_obj(fb, 0);
if (!gem)
return;
writel(gem->dma_addr, priv->base + CRT_ADDR);
}
static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
u32 reg = readl(priv->base + CRT_CTRL1);
/* Clear pending VBLANK IRQ */
writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
reg |= CRT_CTRL_VERTICAL_INTR_EN;
writel(reg, priv->base + CRT_CTRL1);
return 0;
}
static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
u32 reg = readl(priv->base + CRT_CTRL1);
reg &= ~CRT_CTRL_VERTICAL_INTR_EN;
writel(reg, priv->base + CRT_CTRL1);
/* Clear pending VBLANK IRQ */
writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
}
static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
.enable = aspeed_gfx_pipe_enable,
.disable = aspeed_gfx_pipe_disable,
.update = aspeed_gfx_pipe_update,
.enable_vblank = aspeed_gfx_enable_vblank,
.disable_vblank = aspeed_gfx_disable_vblank,
};
static const uint32_t aspeed_gfx_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB565,
};
int aspeed_gfx_create_pipe(struct drm_device *drm)
{
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
aspeed_gfx_formats,
ARRAY_SIZE(aspeed_gfx_formats),
NULL,
&priv->connector);
}
| linux-master | drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2018 IBM Corporation
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_drv.h>
#include "aspeed_gfx.h"
/**
* DOC: ASPEED GFX Driver
*
* This driver is for the ASPEED BMC SoC's 'GFX' display hardware, also called
* the 'SOC Display Controller' in the datasheet. This driver runs on the ARM
* based BMC systems, unlike the ast driver which runs on a host CPU and is for
* a PCIe graphics device.
*
* The AST2500 supports a total of 3 output paths:
*
* 1. VGA output, the output target can choose either or both to the DAC
* or DVO interface.
*
* 2. Graphics CRT output, the output target can choose either or both to
* the DAC or DVO interface.
*
* 3. Video input from DVO, the video input can be used for video engine
* capture or DAC display output.
*
* Output options are selected in SCU2C.
*
* The "VGA mode" device is the PCI attached controller. The "Graphics CRT"
* is the ARM's internal display controller.
*
* The driver only supports a simple configuration consisting of a 40MHz
* pixel clock, fixed by hardware limitations, and the VGA output path.
*
* The driver was written with the 'AST2500 Software Programming Guide' v17,
* which is available under NDA from ASPEED.
*/
struct aspeed_gfx_config {
u32 dac_reg; /* DAC register in SCU */
u32 int_clear_reg; /* Interrupt clear register */
u32 vga_scratch_reg; /* VGA scratch register in SCU */
u32 throd_val; /* Default Threshold Seting */
u32 scan_line_max; /* Max memory size of one scan line */
};
static const struct aspeed_gfx_config ast2400_config = {
.dac_reg = 0x2c,
.int_clear_reg = 0x60,
.vga_scratch_reg = 0x50,
.throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12),
.scan_line_max = 64,
};
static const struct aspeed_gfx_config ast2500_config = {
.dac_reg = 0x2c,
.int_clear_reg = 0x60,
.vga_scratch_reg = 0x50,
.throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c),
.scan_line_max = 128,
};
static const struct aspeed_gfx_config ast2600_config = {
.dac_reg = 0xc0,
.int_clear_reg = 0x68,
.vga_scratch_reg = 0x50,
.throd_val = CRT_THROD_LOW(0x50) | CRT_THROD_HIGH(0x70),
.scan_line_max = 128,
};
static const struct of_device_id aspeed_gfx_match[] = {
{ .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config },
{ .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config },
{ .compatible = "aspeed,ast2600-gfx", .data = &ast2600_config },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_gfx_match);
static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int aspeed_gfx_setup_mode_config(struct drm_device *drm)
{
int ret;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 800;
drm->mode_config.max_height = 600;
drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs;
return ret;
}
static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
{
struct drm_device *drm = data;
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
u32 reg;
reg = readl(priv->base + CRT_CTRL1);
if (reg & CRT_CTRL_VERTICAL_INTR_STS) {
drm_crtc_handle_vblank(&priv->pipe.crtc);
writel(reg, priv->base + priv->int_clr_reg);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int aspeed_gfx_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
const struct of_device_id *match;
struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
match = of_match_device(aspeed_gfx_match, &pdev->dev);
if (!match)
return -EINVAL;
config = match->data;
priv->dac_reg = config->dac_reg;
priv->int_clr_reg = config->int_clear_reg;
priv->vga_scratch_reg = config->vga_scratch_reg;
priv->throd_val = config->throd_val;
priv->scan_line_max = config->scan_line_max;
priv->scu = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(priv->scu)) {
priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
if (IS_ERR(priv->scu)) {
dev_err(&pdev->dev, "failed to find SCU regmap\n");
return PTR_ERR(priv->scu);
}
}
ret = of_reserved_mem_device_init(drm->dev);
if (ret) {
dev_err(&pdev->dev,
"failed to initialize reserved mem: %d\n", ret);
return ret;
}
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret);
return ret;
}
priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rst)) {
dev_err(&pdev->dev,
"missing or invalid reset controller device tree entry");
return PTR_ERR(priv->rst);
}
reset_control_deassert(priv->rst);
priv->clk = devm_clk_get(drm->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev,
"missing or invalid clk device tree entry");
return PTR_ERR(priv->clk);
}
clk_prepare_enable(priv->clk);
/* Sanitize control registers */
writel(0, priv->base + CRT_CTRL1);
writel(0, priv->base + CRT_CTRL2);
ret = aspeed_gfx_setup_mode_config(drm);
if (ret < 0)
return ret;
ret = drm_vblank_init(drm, 1);
if (ret < 0) {
dev_err(drm->dev, "Failed to initialise vblank\n");
return ret;
}
ret = aspeed_gfx_create_output(drm);
if (ret < 0) {
dev_err(drm->dev, "Failed to create outputs\n");
return ret;
}
ret = aspeed_gfx_create_pipe(drm);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
return ret;
}
ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 0),
aspeed_gfx_irq_handler, 0, "aspeed gfx", drm);
if (ret < 0) {
dev_err(drm->dev, "Failed to install IRQ handler\n");
return ret;
}
drm_mode_config_reset(drm);
return 0;
}
static void aspeed_gfx_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
}
DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
.date = "20180319",
.major = 1,
.minor = 0,
};
static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct aspeed_gfx *priv = dev_get_drvdata(dev);
u32 val;
int rc;
rc = kstrtou32(buf, 0, &val);
if (rc)
return rc;
if (val > 3)
return -EINVAL;
rc = regmap_update_bits(priv->scu, priv->dac_reg, 0x30000, val << 16);
if (rc < 0)
return 0;
return count;
}
static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aspeed_gfx *priv = dev_get_drvdata(dev);
u32 reg;
int rc;
rc = regmap_read(priv->scu, priv->dac_reg, ®);
if (rc)
return rc;
return sprintf(buf, "%u\n", (reg >> 16) & 0x3);
}
static DEVICE_ATTR_RW(dac_mux);
static ssize_t
vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aspeed_gfx *priv = dev_get_drvdata(dev);
u32 reg;
int rc;
rc = regmap_read(priv->scu, priv->vga_scratch_reg, ®);
if (rc)
return rc;
return sprintf(buf, "%u\n", reg);
}
static DEVICE_ATTR_RO(vga_pw);
static struct attribute *aspeed_sysfs_entries[] = {
&dev_attr_vga_pw.attr,
&dev_attr_dac_mux.attr,
NULL,
};
static struct attribute_group aspeed_sysfs_attr_group = {
.attrs = aspeed_sysfs_entries,
};
static int aspeed_gfx_probe(struct platform_device *pdev)
{
struct aspeed_gfx *priv;
int ret;
priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver,
struct aspeed_gfx, drm);
if (IS_ERR(priv))
return PTR_ERR(priv);
ret = aspeed_gfx_load(&priv->drm);
if (ret)
return ret;
platform_set_drvdata(pdev, priv);
ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
if (ret)
return ret;
ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
drm_fbdev_dma_setup(&priv->drm, 32);
return 0;
err_unload:
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
aspeed_gfx_unload(&priv->drm);
return ret;
}
static void aspeed_gfx_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
}
static struct platform_driver aspeed_gfx_platform_driver = {
.probe = aspeed_gfx_probe,
.remove_new = aspeed_gfx_remove,
.driver = {
.name = "aspeed_gfx",
.of_match_table = aspeed_gfx_match,
},
};
drm_module_platform_driver(aspeed_gfx_platform_driver);
MODULE_AUTHOR("Joel Stanley <[email protected]>");
MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/aspeed/aspeed_gfx_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/sort.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_panel.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
#include "omap_fbdev.h"
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
#define DRIVER_DATE "20110917"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
/*
* mode config funcs
*/
/* Notes about mapping DSS and DRM entities:
* CRTC: overlay
* encoder: manager.. with some extension to allow one primary CRTC
* and zero or more video CRTC's to be mapped to one encoder?
* connector: dssdev.. manager can be attached/detached from different
* devices
*/
static void omap_atomic_wait_for_completion(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
unsigned int i;
int ret;
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;
ret = omap_crtc_wait_pending(crtc);
if (!ret)
dev_warn(dev->dev,
"atomic complete timeout (pipe %u)!\n", i);
}
}
static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
bool fence_cookie = dma_fence_begin_signalling();
dispc_runtime_get(priv->dispc);
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
if (priv->omaprev != 0x3430) {
/* With the current dss dispc implementation we have to enable
* the new modeset before we can commit planes. The dispc ovl
* configuration relies on the video mode configuration been
* written into the HW when the ovl configuration is
* calculated.
*
* This approach is not ideal because after a mode change the
* plane update is executed only after the first vblank
* interrupt. The dispc implementation should be fixed so that
* it is able use uncommitted drm state information.
*/
drm_atomic_helper_commit_modeset_enables(dev, old_state);
omap_atomic_wait_for_completion(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state, 0);
} else {
/*
* OMAP3 DSS seems to have issues with the work-around above,
* resulting in endless sync losts if a crtc is enabled without
* a plane. For now, skip the WA for OMAP3.
*/
drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
}
drm_atomic_helper_commit_hw_done(old_state);
dma_fence_end_signalling(fence_cookie);
/*
* Wait for completion of the page flips to ensure that old buffers
* can't be touched by the hardware anymore before cleaning up planes.
*/
omap_atomic_wait_for_completion(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
dispc_runtime_put(priv->dispc);
}
static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
{
const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
if (sa->normalized_zpos != sb->normalized_zpos)
return sa->normalized_zpos - sb->normalized_zpos;
else
return sa->plane->base.id - sb->plane->base.id;
}
/*
* This replaces the drm_atomic_normalize_zpos to handle the dual overlay case.
*
* Since both halves need to be 'appear' side by side the zpos is
* recalculated when dealing with dual overlay cases so that the other
* planes zpos is consistent.
*/
static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_state, *new_state;
struct drm_plane *plane;
int c, i, n, inc;
int total_planes = dev->mode_config.num_total_plane;
struct drm_plane_state **states;
int ret = 0;
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return -ENOMEM;
for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
if (old_state->plane_mask == new_state->plane_mask &&
!new_state->zpos_changed)
continue;
/* Reset plane increment and index value for every crtc */
n = 0;
/*
* Normalization process might create new states for planes
* which normalized_zpos has to be recalculated.
*/
drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(new_state->state,
plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto done;
}
states[n++] = plane_state;
}
sort(states, n, sizeof(*states),
drm_atomic_state_normalized_zpos_cmp, NULL);
for (i = 0, inc = 0; i < n; i++) {
plane = states[i]->plane;
states[i]->normalized_zpos = i + inc;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
plane->base.id, plane->name,
states[i]->normalized_zpos);
if (is_omap_plane_dual_overlay(states[i]))
inc++;
}
new_state->zpos_changed = true;
}
done:
kfree(states);
return ret;
}
static int omap_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
if (dev->mode_config.normalize_zpos) {
ret = omap_atomic_update_normalize_zpos(dev, state);
if (ret)
return ret;
}
return 0;
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
.atomic_commit_tail = omap_atomic_commit_tail,
};
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.atomic_check = omap_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct omap_global_state *
omap_get_existing_global_state(struct omap_drm_private *priv)
{
return to_omap_global_state(priv->glob_obj.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct omap_global_state *__must_check
omap_get_global_state(struct drm_atomic_state *s)
{
struct omap_drm_private *priv = s->dev->dev_private;
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_omap_global_state(priv_state);
}
static struct drm_private_state *
omap_global_duplicate_state(struct drm_private_obj *obj)
{
struct omap_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void omap_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct omap_global_state *omap_state = to_omap_global_state(state);
kfree(omap_state);
}
static const struct drm_private_state_funcs omap_global_state_funcs = {
.atomic_duplicate_state = omap_global_duplicate_state,
.atomic_destroy_state = omap_global_destroy_state,
};
static int omap_global_obj_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_global_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
&omap_global_state_funcs);
return 0;
}
static void omap_global_obj_fini(struct omap_drm_private *priv)
{
drm_atomic_private_obj_fini(&priv->glob_obj);
}
static void omap_disconnect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
pipe->output = NULL;
}
memset(&priv->channels, 0, sizeof(priv->channels));
priv->num_pipes = 0;
}
static int omap_connect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
struct omap_dss_device *output = NULL;
int r;
for_each_dss_output(output) {
r = omapdss_device_connect(priv->dss, NULL, output);
if (r == -EPROBE_DEFER) {
omapdss_device_put(output);
return r;
} else if (r) {
dev_warn(output->dev, "could not connect output %s\n",
output->name);
} else {
struct omap_drm_pipeline *pipe;
pipe = &priv->pipes[priv->num_pipes++];
pipe->output = omapdss_device_get(output);
if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
/* To balance the 'for_each_dss_output' loop */
omapdss_device_put(output);
break;
}
}
}
return 0;
}
static int omap_compare_pipelines(const void *a, const void *b)
{
const struct omap_drm_pipeline *pipe1 = a;
const struct omap_drm_pipeline *pipe2 = b;
if (pipe1->alias_id > pipe2->alias_id)
return 1;
else if (pipe1->alias_id < pipe2->alias_id)
return -1;
return 0;
}
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
if (!priv->zorder_prop)
return -ENOMEM;
return 0;
}
static int omap_display_id(struct omap_dss_device *output)
{
struct device_node *node = NULL;
if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
while (drm_bridge_get_next_bridge(bridge))
bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
}
return node ? of_alias_get_id(node, "display") : -ENODEV;
}
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
int num_ovls = dispc_get_num_ovls(priv->dispc);
int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int i;
int ret;
u32 plane_crtc_mask;
if (!omapdss_stack_is_ready())
return -EPROBE_DEFER;
ret = omap_modeset_init_properties(dev);
if (ret < 0)
return ret;
/*
* This function creates exactly one connector, encoder, crtc,
* and primary plane per each connected dss-device. Each
* connector->encoder->crtc chain is expected to be separate
* and each crtc is connect to a single dss-channel. If the
* configuration does not match the expectations or exceeds
* the available resources, the configuration is rejected.
*/
ret = omap_connect_pipelines(dev);
if (ret < 0)
return ret;
if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
dev_err(dev->dev, "%s(): Too many connected displays\n",
__func__);
return -EINVAL;
}
/* Create all planes first. They can all be put to any CRTC. */
plane_crtc_mask = (1 << priv->num_pipes) - 1;
for (i = 0; i < num_ovls; i++) {
enum drm_plane_type type = i < priv->num_pipes
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
struct drm_plane *plane;
if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
return -EINVAL;
plane = omap_plane_init(dev, i, type, plane_crtc_mask);
if (IS_ERR(plane))
return PTR_ERR(plane);
priv->planes[priv->num_planes++] = plane;
}
/*
* Create the encoders, attach the bridges and get the pipeline alias
* IDs.
*/
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
int id;
pipe->encoder = omap_encoder_init(dev, pipe->output);
if (!pipe->encoder)
return -ENOMEM;
if (pipe->output->bridge) {
ret = drm_bridge_attach(pipe->encoder,
pipe->output->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
}
id = omap_display_id(pipe->output);
pipe->alias_id = id >= 0 ? id : i;
}
/* Sort the pipelines by DT aliases. */
sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
omap_compare_pipelines, NULL);
/*
* Populate the pipeline lookup table by DISPC channel. Only one display
* is allowed per channel.
*/
for (i = 0; i < priv->num_pipes; ++i) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
enum omap_channel channel = pipe->output->dispc_channel;
if (WARN_ON(priv->channels[channel] != NULL))
return -EINVAL;
priv->channels[channel] = pipe;
}
/* Create the connectors and CRTCs. */
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
pipe->connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(pipe->connector)) {
dev_err(priv->dev,
"unable to create bridge connector for %s\n",
pipe->output->name);
return PTR_ERR(pipe->connector);
}
drm_connector_attach_encoder(pipe->connector, encoder);
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
encoder->possible_crtcs = 1 << i;
pipe->crtc = crtc;
}
DBG("registered %u planes, %u crtcs/encoders/connectors\n",
priv->num_planes, priv->num_pipes);
dev->mode_config.min_width = 8;
dev->mode_config.min_height = 2;
/*
* Note: these values are used for multiple independent things:
* connector mode filtering, buffer sizes, crtc sizes...
* Use big enough values here to cover all use cases, and do more
* specific checking in the respective code paths.
*/
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
/* We want the zpos to be normalized */
dev->mode_config.normalize_zpos = true;
dev->mode_config.funcs = &omap_mode_config_funcs;
dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
drm_mode_config_reset(dev);
omap_drm_irq_install(dev);
return 0;
}
static void omap_modeset_fini(struct drm_device *ddev)
{
omap_drm_irq_uninstall(ddev);
drm_mode_config_cleanup(ddev);
}
/*
* drm ioctl funcs
*/
static int ioctl_get_param(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_omap_param *args = data;
DBG("%p: param=%llu", dev, args->param);
switch (args->param) {
case OMAP_PARAM_CHIPSET_ID:
args->value = priv->omaprev;
break;
default:
DBG("unknown parameter %lld", args->param);
return -EINVAL;
}
return 0;
}
#define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
static int ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_new *args = data;
u32 flags = args->flags & OMAP_BO_USER_MASK;
VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
args->size.bytes, flags);
return omap_gem_new_handle(dev, file_priv, args->size, flags,
&args->handle);
}
static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_info *args = data;
struct drm_gem_object *obj;
int ret = 0;
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
args->size = omap_gem_mmap_size(obj);
args->offset = omap_gem_mmap_offset(obj);
drm_gem_object_put(obj);
return ret;
}
static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, drm_invalid_op,
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
DRM_RENDER_ALLOW),
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
DRM_RENDER_ALLOW),
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
DRM_RENDER_ALLOW),
};
/*
* drm driver funcs
*/
static int dev_open(struct drm_device *dev, struct drm_file *file)
{
file->driver_priv = NULL;
DBG("open: dev=%p, file=%p", dev, file);
return 0;
}
DEFINE_DRM_GEM_FOPS(omapdriver_fops);
static const struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
.open = dev_open,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
#endif
.gem_prime_import = omap_gem_prime_import,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static const struct soc_device_attribute omapdrm_soc_devices[] = {
{ .family = "OMAP3", .data = (void *)0x3430 },
{ .family = "OMAP4", .data = (void *)0x4430 },
{ .family = "OMAP5", .data = (void *)0x5430 },
{ .family = "DRA7", .data = (void *)0x0752 },
{ /* sentinel */ }
};
static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
{
const struct soc_device_attribute *soc;
struct dss_pdata *pdata = dev->platform_data;
struct drm_device *ddev;
int ret;
DBG("%s", dev_name(dev));
if (drm_firmware_drivers_only())
return -ENODEV;
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&omap_drm_driver, dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
priv->ddev = ddev;
ddev->dev_private = priv;
priv->dev = dev;
priv->dss = pdata->dss;
priv->dispc = dispc_get_dispc(priv->dss);
priv->dss->mgr_ops_priv = priv;
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (uintptr_t)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
/* Get memory bandwidth limits */
priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
omap_gem_init(ddev);
drm_mode_config_init(ddev);
ret = omap_global_obj_init(ddev);
if (ret)
goto err_gem_deinit;
ret = omap_hwoverlays_init(priv);
if (ret)
goto err_free_priv_obj;
ret = omap_modeset_init(ddev);
if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
goto err_free_overlays;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
ret = drm_vblank_init(ddev, priv->num_pipes);
if (ret) {
dev_err(priv->dev, "could not init vblank\n");
goto err_cleanup_modeset;
}
drm_kms_helper_poll_init(ddev);
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_cleanup_helpers;
omap_fbdev_setup(ddev);
return 0;
err_cleanup_helpers:
drm_kms_helper_poll_fini(ddev);
err_cleanup_modeset:
omap_modeset_fini(ddev);
err_free_overlays:
omap_hwoverlays_destroy(priv);
err_free_priv_obj:
omap_global_obj_fini(priv);
err_gem_deinit:
drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
drm_dev_put(ddev);
return ret;
}
static void omapdrm_cleanup(struct omap_drm_private *priv)
{
struct drm_device *ddev = priv->ddev;
DBG("");
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
drm_atomic_helper_shutdown(ddev);
omap_modeset_fini(ddev);
omap_hwoverlays_destroy(priv);
omap_global_obj_fini(priv);
drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
drm_dev_put(ddev);
}
static int pdev_probe(struct platform_device *pdev)
{
struct omap_drm_private *priv;
int ret;
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
return ret;
}
/* Allocate and initialize the driver private structure. */
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
ret = omapdrm_init(priv, &pdev->dev);
if (ret < 0)
kfree(priv);
return ret;
}
static void pdev_remove(struct platform_device *pdev)
{
struct omap_drm_private *priv = platform_get_drvdata(pdev);
omapdrm_cleanup(priv);
kfree(priv);
}
#ifdef CONFIG_PM_SLEEP
static int omap_drm_suspend(struct device *dev)
{
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
return drm_mode_config_helper_suspend(drm_dev);
}
static int omap_drm_resume(struct device *dev)
{
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
drm_mode_config_helper_resume(drm_dev);
return omap_gem_resume(drm_dev);
}
#endif
static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
static struct platform_driver pdev = {
.driver = {
.name = "omapdrm",
.pm = &omapdrm_pm_ops,
},
.probe = pdev_probe,
.remove_new = pdev_remove,
};
static struct platform_driver * const drivers[] = {
&omap_dmm_driver,
&pdev,
};
static int __init omap_drm_init(void)
{
int r;
DBG("init");
r = omap_dss_init();
if (r)
return r;
r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (r) {
omap_dss_exit();
return r;
}
return 0;
}
static void __exit omap_drm_fini(void)
{
DBG("fini");
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
omap_dss_exit();
}
module_init(omap_drm_init);
module_exit(omap_drm_fini);
MODULE_AUTHOR("Rob Clark <[email protected]>");
MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
MODULE_DESCRIPTION("OMAP DRM Display Driver");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/omapdrm/omap_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
/*
* GEM buffer object implementation.
*/
/* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
struct omap_gem_object {
struct drm_gem_object base;
struct list_head mm_list;
u32 flags;
/** width/height for tiled formats (rounded up to slot boundaries) */
u16 width, height;
/** roll applied when mapping to DMM */
u32 roll;
/** protects pin_cnt, block, pages, dma_addrs and vaddr */
struct mutex lock;
/**
* dma_addr contains the buffer DMA address. It is valid for
*
* - buffers allocated through the DMA mapping API (with the
* OMAP_BO_MEM_DMA_API flag set)
*
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
* if they are physically contiguous (when sgt->orig_nents == 1)
*
* - buffers mapped through the TILER when pin_cnt is not zero, in which
* case the DMA address points to the TILER aperture
*
* Physically contiguous buffers have their DMA address equal to the
* physical address as we don't remap those buffers through the TILER.
*
* Buffers mapped to the TILER have their DMA address pointing to the
* TILER aperture. As TILER mappings are refcounted (through pin_cnt)
* the DMA address must be accessed through omap_gem_pin() to ensure
* that the mapping won't disappear unexpectedly. References must be
* released with omap_gem_unpin().
*/
dma_addr_t dma_addr;
/**
* # of users
*/
refcount_t pin_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
* is set and the sgt field is valid.
*/
struct sg_table *sgt;
/**
* tiler block used when buffer is remapped in DMM/TILER.
*/
struct tiler_block *block;
/**
* Array of backing pages, if allocated. Note that pages are never
* allocated for buffers originally allocated from contiguous memory
*/
struct page **pages;
/** addresses corresponding to pages in above array */
dma_addr_t *dma_addrs;
/**
* Virtual address, if mapped.
*/
void *vaddr;
};
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
* not necessarily pinned in TILER all the time, and (b) when they are
* they are not necessarily page aligned, we reserve one or more small
* regions in each of the 2d containers to use as a user-GART where we
* can create a second page-aligned mapping of parts of the buffer
* being accessed from userspace.
*
* Note that we could optimize slightly when we know that multiple
* tiler containers are backed by the same PAT.. but I'll leave that
* for later..
*/
#define NUM_USERGART_ENTRIES 2
struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */
dma_addr_t dma_addr;
struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */
};
struct omap_drm_usergart {
struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
int height; /* height in rows */
int height_shift; /* ilog2(height in rows) */
int slot_shift; /* ilog2(width per slot) */
int stride_pfn; /* stride in pages */
int last; /* index of last used entry */
};
/* -----------------------------------------------------------------------------
* Helpers
*/
/** get mmap offset */
u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
return true;
return false;
}
/* -----------------------------------------------------------------------------
* Eviction
*/
static void omap_gem_evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
loff_t off = omap_gem_mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
if (m > 1) {
int i;
/* if stride > than PAGE_SIZE then sparse mapping: */
for (i = n; i > 0; i--) {
unmap_mapping_range(obj->dev->anon_inode->i_mapping,
off, PAGE_SIZE, 1);
off += PAGE_SIZE * m;
}
} else {
unmap_mapping_range(obj->dev->anon_inode->i_mapping,
off, size, 1);
}
entry->obj = NULL;
}
/* Evict a buffer from usergart, if it is mapped there */
static void omap_gem_evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
struct omap_drm_usergart_entry *entry =
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
omap_gem_evict_entry(obj, fmt, entry);
}
}
}
/* -----------------------------------------------------------------------------
* Page Management
*/
/*
* Ensure backing pages are allocated. Must be called with the omap_obj.lock
* held.
*/
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct page **pages;
int npages = obj->size >> PAGE_SHIFT;
int i, ret;
dma_addr_t *addrs;
lockdep_assert_held(&omap_obj->lock);
/*
* If not using shmem (in which case backing pages don't need to be
* allocated) or if pages are already allocated we're done.
*/
if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages);
}
/* for non-cached buffers, ensure the new pages are clean because
* DSS, GPU, etc. are not cache coherent:
*/
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
}
for (i = 0; i < npages; i++) {
addrs[i] = dma_map_page(dev->dev, pages[i],
0, PAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addrs[i])) {
dev_warn(dev->dev,
"%s: failed to map page\n", __func__);
for (i = i - 1; i >= 0; --i) {
dma_unmap_page(dev->dev, addrs[i],
PAGE_SIZE, DMA_TO_DEVICE);
}
ret = -ENOMEM;
goto free_addrs;
}
}
} else {
addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
}
}
omap_obj->dma_addrs = addrs;
omap_obj->pages = pages;
return 0;
free_addrs:
kfree(addrs);
free_pages:
drm_gem_put_pages(obj, pages, true, false);
return ret;
}
/* Release backing pages. Must be called with the omap_obj.lock held. */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
unsigned int npages = obj->size >> PAGE_SHIFT;
unsigned int i;
lockdep_assert_held(&omap_obj->lock);
for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
PAGE_SIZE, DMA_TO_DEVICE);
}
kfree(omap_obj->dma_addrs);
omap_obj->dma_addrs = NULL;
drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
}
/* get buffer flags */
u32 omap_gem_flags(struct drm_gem_object *obj)
{
return to_omap_bo(obj)->flags;
}
/** get mmap size */
size_t omap_gem_mmap_size(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
* pages, only the valid picture part.. so need to adjust for
* this in the size used to mmap and generate mmap offset
*/
size = tiler_vsize(gem2fmt(omap_obj->flags),
omap_obj->width, omap_obj->height);
}
return size;
}
/* -----------------------------------------------------------------------------
* Fault Handling
*/
/* Normal handling for the case of faulting in non-tiled buffers */
static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
unsigned long pfn;
pgoff_t pgoff;
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
BUG_ON(!omap_gem_is_contiguous(omap_obj));
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
return vmf_insert_mixed(vma, vmf->address,
__pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_drm_usergart_entry *entry;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
unsigned long vaddr;
int i, err, slots;
vm_fault_t ret = VM_FAULT_NOPAGE;
/*
* Note the height of the slot is also equal to the number of pages
* that need to be mapped in to fill 4kb wide CPU page. If the slot
* height is 64, then 64 pages fill a 4kb wide by 64 row region.
*/
const int n = priv->usergart[fmt].height;
const int n_shift = priv->usergart[fmt].height_shift;
/*
* If buffer width in bytes > PAGE_SIZE then the virtual stride is
* rounded up to next multiple of PAGE_SIZE.. this need to be taken
* into account in some of the math, so figure out virtual stride
* in pages
*/
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/*
* Actual address we start mapping at is rounded down to previous slot
* boundary in the y direction:
*/
base_pgoff = round_down(pgoff, m << n_shift);
/* figure out buffer width in slots */
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
omap_gem_evict_entry(entry->obj, fmt, entry);
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
/* now convert base_pgoff to phys offset from virt offset: */
base_pgoff = (base_pgoff >> n_shift) * slots;
/* for wider-than 4k.. figure out which part of the slot-row we want: */
if (m > 1) {
int off = pgoff % m;
entry->obj_pgoff += off;
base_pgoff /= m;
slots = min(slots - (off << n_shift), n);
base_pgoff += off << n_shift;
vaddr += off << PAGE_SHIFT;
}
/*
* Map in pages. Beyond the valid pixel part of the buffer, we set
* pages[i] to NULL to get a dummy page mapped in.. if someone
* reads/writes it they will get random/undefined content, but at
* least it won't be corrupting whatever other random page used to
* be mapped in, or other undefined behavior.
*/
memcpy(pages, &omap_obj->pages[base_pgoff],
sizeof(struct page *) * slots);
memset(pages + slots, 0,
sizeof(struct page *) * (n - slots));
err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
if (err) {
ret = vmf_error(err);
dev_err(obj->dev->dev, "failed to pin: %d\n", err);
return ret;
}
pfn = entry->dma_addr >> PAGE_SHIFT;
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
ret = vmf_insert_mixed(vma,
vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
if (ret & VM_FAULT_ERROR)
break;
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
/* simple round-robin: */
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
return ret;
}
/**
* omap_gem_fault - pagefault handler for GEM objects
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
* does most of the work for us including the actual map/unmap calls
* but we need to do the actual page work.
*
* The VMA was set up by GEM. In doing so it also ensured that the
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int err;
vm_fault_t ret;
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
mutex_lock(&omap_obj->lock);
/* if a shmem backed object, make sure we have pages attached now */
err = omap_gem_attach_pages(obj);
if (err) {
ret = vmf_error(err);
goto fail;
}
/* where should we do corresponding put_pages().. we are mapping
* the original page, rather than thru a GART, so we can't rely
* on eviction to trigger this. But munmap() or all mappings should
* probably trigger put_pages()?
*/
if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
fail:
mutex_unlock(&omap_obj->lock);
return ret;
}
static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
} else {
/*
* We do have some private objects, at least for scanout buffers
* on hardware without DMM/TILER. But these are allocated write-
* combine
*/
if (WARN_ON(!obj->filp))
return -EINVAL;
/*
* Shunt off cached objs to shmem file so they have their own
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return 0;
}
/* -----------------------------------------------------------------------------
* Dumb Buffers
*/
/**
* omap_gem_dumb_create - create a dumb buffer
* @file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
* Allocate a buffer suitable for use for a frame buffer of the
* form described by user space. Give userspace a handle by which
* to reference it.
*/
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
union omap_gem_size gsize;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->size = PAGE_ALIGN(args->pitch * args->height);
gsize = (union omap_gem_size){
.bytes = args->size,
};
return omap_gem_new_handle(dev, file, gsize,
OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
}
/**
* omap_gem_dumb_map_offset - create an offset for a dumb buffer
* @file: our drm client file
* @dev: drm device
* @handle: GEM handle to the object (from dumb_create)
* @offset: memory map offset placeholder
*
* Do the necessary setup to allow the mapping of the frame buffer
* into user memory. We don't have to do much here at the moment.
*/
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset)
{
struct drm_gem_object *obj;
int ret = 0;
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
}
*offset = omap_gem_mmap_offset(obj);
drm_gem_object_put(obj);
fail:
return ret;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Set scrolling position. This allows us to implement fast scrolling
* for console.
*
* Call only from non-atomic contexts.
*/
int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
u32 npages = obj->size >> PAGE_SHIFT;
int ret = 0;
if (roll > npages) {
dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
return -EINVAL;
}
omap_obj->roll = roll;
mutex_lock(&omap_obj->lock);
/* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) {
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
roll, true);
if (ret)
dev_err(obj->dev->dev, "could not repin: %d\n", ret);
}
fail:
mutex_unlock(&omap_obj->lock);
return ret;
}
#endif
/* -----------------------------------------------------------------------------
* Memory Management & DMA Sync
*/
/*
* shmem buffers that are mapped cached are not coherent.
*
* We keep track of dirty pages using page faulting to perform cache management.
* When a page is mapped to the CPU in read/write mode the device can't access
* it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
* the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
* unmapped from the CPU.
*/
static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
}
/* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages()
*/
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (omap_gem_is_cached_coherent(obj))
return;
if (omap_obj->dma_addrs[pgoff]) {
dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
PAGE_SIZE, DMA_TO_DEVICE);
omap_obj->dma_addrs[pgoff] = 0;
}
}
/* sync the buffer for DMA access */
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
enum dma_data_direction dir)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int i, npages = obj->size >> PAGE_SHIFT;
struct page **pages = omap_obj->pages;
bool dirty = false;
if (omap_gem_is_cached_coherent(obj))
return;
for (i = 0; i < npages; i++) {
if (!omap_obj->dma_addrs[i]) {
dma_addr_t addr;
addr = dma_map_page(dev->dev, pages[i], 0,
PAGE_SIZE, dir);
if (dma_mapping_error(dev->dev, addr)) {
dev_warn(dev->dev, "%s: failed to map page\n",
__func__);
break;
}
dirty = true;
omap_obj->dma_addrs[i] = addr;
}
}
if (dirty) {
unmap_mapping_range(obj->filp->f_mapping, 0,
omap_gem_mmap_size(obj), 1);
}
}
static int omap_gem_pin_tiler(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
int ret;
BUG_ON(omap_obj->block);
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
PAGE_SIZE);
} else {
block = tiler_reserve_1d(obj->size);
}
if (IS_ERR(block)) {
ret = PTR_ERR(block);
dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
goto fail;
}
/* TODO: enable async refill.. */
ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
if (ret) {
tiler_release(block);
dev_err(obj->dev->dev, "could not pin: %d\n", ret);
goto fail;
}
omap_obj->dma_addr = tiler_ssptr(block);
omap_obj->block = block;
DBG("got dma address: %pad", &omap_obj->dma_addr);
fail:
return ret;
}
/**
* omap_gem_pin() - Pin a GEM object in memory
* @obj: the GEM object
* @dma_addr: the DMA address
*
* Pin the given GEM object in memory and fill the dma_addr pointer with the
* object's DMA address. If the buffer is not physically contiguous it will be
* remapped through the TILER to provide a contiguous view.
*
* Pins are reference-counted, calling this function multiple times is allowed
* as long the corresponding omap_gem_unpin() calls are balanced.
*
* Return 0 on success or a negative error code otherwise.
*/
int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
{
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj)) {
if (refcount_read(&omap_obj->pin_cnt) == 0) {
refcount_set(&omap_obj->pin_cnt, 1);
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
if (omap_obj->flags & OMAP_BO_SCANOUT) {
if (priv->has_dmm) {
ret = omap_gem_pin_tiler(obj);
if (ret)
goto fail;
}
}
} else {
refcount_inc(&omap_obj->pin_cnt);
}
}
if (dma_addr)
*dma_addr = omap_obj->dma_addr;
fail:
mutex_unlock(&omap_obj->lock);
return ret;
}
/**
* omap_gem_unpin_locked() - Unpin a GEM object from memory
* @obj: the GEM object
*
* omap_gem_unpin() without locking.
*/
static void omap_gem_unpin_locked(struct drm_gem_object *obj)
{
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
if (omap_gem_is_contiguous(omap_obj))
return;
if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
if (omap_obj->sgt) {
sg_free_table(omap_obj->sgt);
kfree(omap_obj->sgt);
omap_obj->sgt = NULL;
}
if (!(omap_obj->flags & OMAP_BO_SCANOUT))
return;
if (priv->has_dmm) {
ret = tiler_unpin(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
"could not unpin pages: %d\n", ret);
}
ret = tiler_release(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
"could not release unmap: %d\n", ret);
}
omap_obj->dma_addr = 0;
omap_obj->block = NULL;
}
}
}
/**
* omap_gem_unpin() - Unpin a GEM object from memory
* @obj: the GEM object
*
* Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
* reference-counted, the actual unpin will only be performed when the number
* of calls to this function matches the number of calls to omap_gem_pin().
*/
void omap_gem_unpin(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
mutex_lock(&omap_obj->lock);
omap_gem_unpin_locked(obj);
mutex_unlock(&omap_obj->lock);
}
/* Get rotated scanout address (only valid if already pinned), at the
* specified orientation and x,y offset from top-left corner of buffer
* (only valid for tiled 2d buffers)
*/
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
mutex_lock(&omap_obj->lock);
if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
mutex_unlock(&omap_obj->lock);
return ret;
}
/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
/* if !remap, and we don't have pages backing, then fail, rather than
* increasing the pin count (which we don't really do yet anyways,
* because we don't support swapping pages back out). And 'remap'
* might not be quite the right name, but I wanted to keep it working
* similarly to omap_gem_pin(). Note though that mutex is not
* aquired if !remap (because this can be called in atomic ctxt),
* but probably omap_gem_unpin() should be changed to work in the
* same way. If !remap, a matching omap_gem_put_pages() call is not
* required (and should not be made).
*/
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
mutex_lock(&omap_obj->lock);
if (remap) {
ret = omap_gem_attach_pages(obj);
if (ret)
goto unlock;
}
if (!omap_obj->pages) {
ret = -ENOMEM;
goto unlock;
}
*pages = omap_obj->pages;
unlock:
mutex_unlock(&omap_obj->lock);
return ret;
}
/* release pages when DMA no longer being performed */
int omap_gem_put_pages(struct drm_gem_object *obj)
{
/* do something here if we dynamically attach/detach pages.. at
* least they would no longer need to be pinned if everyone has
* released the pages..
*/
return 0;
}
struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
enum dma_data_direction dir)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
dma_addr_t addr;
struct sg_table *sgt;
struct scatterlist *sg;
unsigned int count, len, stride, i;
int ret;
ret = omap_gem_pin(obj, &addr);
if (ret)
return ERR_PTR(ret);
mutex_lock(&omap_obj->lock);
sgt = omap_obj->sgt;
if (sgt)
goto out;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto err_unpin;
}
if (addr) {
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
len = omap_obj->width << (int)fmt;
count = omap_obj->height;
stride = tiler_stride(fmt, 0);
} else {
len = obj->size;
count = 1;
stride = 0;
}
} else {
count = obj->size >> PAGE_SHIFT;
}
ret = sg_alloc_table(sgt, count, GFP_KERNEL);
if (ret)
goto err_free;
/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);
if (addr) {
for_each_sg(sgt->sgl, sg, count, i) {
sg_set_page(sg, phys_to_page(addr), len,
offset_in_page(addr));
sg_dma_address(sg) = addr;
sg_dma_len(sg) = len;
addr += stride;
}
} else {
for_each_sg(sgt->sgl, sg, count, i) {
sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
sg_dma_address(sg) = omap_obj->dma_addrs[i];
sg_dma_len(sg) = PAGE_SIZE;
}
}
omap_obj->sgt = sgt;
out:
mutex_unlock(&omap_obj->lock);
return sgt;
err_free:
kfree(sgt);
err_unpin:
mutex_unlock(&omap_obj->lock);
omap_gem_unpin(obj);
return ERR_PTR(ret);
}
void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (WARN_ON(omap_obj->sgt != sgt))
return;
omap_gem_unpin(obj);
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
/*
* Get kernel virtual address for CPU access.. this more or less only
* exists for omap_fbdev.
*/
void *omap_gem_vaddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
void *vaddr;
int ret;
mutex_lock(&omap_obj->lock);
if (!omap_obj->vaddr) {
ret = omap_gem_attach_pages(obj);
if (ret) {
vaddr = ERR_PTR(ret);
goto unlock;
}
omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
vaddr = omap_obj->vaddr;
unlock:
mutex_unlock(&omap_obj->lock);
return vaddr;
}
#endif
/* -----------------------------------------------------------------------------
* Power Management
*/
#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
int omap_gem_resume(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
int ret = 0;
mutex_lock(&priv->list_lock);
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
u32 npages = obj->size >> PAGE_SHIFT;
WARN_ON(!omap_obj->pages); /* this can't happen */
ret = tiler_pin(omap_obj->block,
omap_obj->pages, npages,
omap_obj->roll, true);
if (ret) {
dev_err(dev->dev, "could not repin: %d\n", ret);
goto done;
}
}
}
done:
mutex_unlock(&priv->list_lock);
return ret;
}
#endif
/* -----------------------------------------------------------------------------
* DebugFS
*/
#ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
u64 off;
off = drm_vma_node_start(&obj->vma_node);
mutex_lock(&omap_obj->lock);
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->dma_addr,
refcount_read(&omap_obj->pin_cnt),
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
seq_printf(m, " (%dx%d, %dx%d)",
area->p0.x, area->p0.y,
area->p1.x, area->p1.y);
}
} else {
seq_printf(m, " %zu", obj->size);
}
mutex_unlock(&omap_obj->lock);
seq_printf(m, "\n");
}
void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
struct omap_gem_object *omap_obj;
int count = 0;
size_t size = 0;
list_for_each_entry(omap_obj, list, mm_list) {
struct drm_gem_object *obj = &omap_obj->base;
seq_printf(m, " ");
omap_gem_describe(obj, m);
count++;
size += obj->size;
}
seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif
/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
static void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
omap_gem_evict(obj);
mutex_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
mutex_unlock(&priv->list_lock);
/*
* We own the sole reference to the object at this point, but to keep
* lockdep happy, we must still take the omap_obj_lock to call
* omap_gem_detach_pages(). This should hardly make any difference as
* there can't be any lock contention.
*/
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
kfree(omap_obj->pages);
else
omap_gem_detach_pages(obj);
}
if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
omap_obj->dma_addr);
} else if (omap_obj->vaddr) {
vunmap(omap_obj->vaddr);
} else if (obj->import_attach) {
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
mutex_unlock(&omap_obj->lock);
drm_gem_object_release(obj);
mutex_destroy(&omap_obj->lock);
kfree(omap_obj);
}
static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
{
struct omap_drm_private *priv = dev->dev_private;
switch (flags & OMAP_BO_CACHE_MASK) {
case OMAP_BO_CACHED:
case OMAP_BO_WC:
case OMAP_BO_CACHE_MASK:
break;
default:
return false;
}
if (flags & OMAP_BO_TILED_MASK) {
if (!priv->usergart)
return false;
switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
case OMAP_BO_TILED_16:
case OMAP_BO_TILED_32:
break;
default:
return false;
}
}
return true;
}
static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs omap_gem_object_funcs = {
.free = omap_gem_free_object,
.export = omap_gem_prime_export,
.mmap = omap_gem_object_mmap,
.vm_ops = &omap_gem_vm_ops,
};
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj;
struct address_space *mapping;
size_t size;
int ret;
if (!omap_gem_validate_flags(dev, flags))
return NULL;
/* Validate the flags and compute the memory and cache flags. */
if (flags & OMAP_BO_TILED_MASK) {
/*
* Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/
flags |= OMAP_BO_MEM_SHMEM;
/*
* Currently don't allow cached buffers. There is some caching
* stuff that needs to be handled better.
*/
flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/*
* If we don't have DMM, we must allocate scanout buffers
* from contiguous DMA memory.
*/
flags |= OMAP_BO_MEM_DMA_API;
} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
/*
* All other buffers not backed by dma_buf are shmem-backed.
*/
flags |= OMAP_BO_MEM_SHMEM;
}
/* Allocate the initialize the OMAP GEM object. */
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj)
return NULL;
obj = &omap_obj->base;
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
*/
tiler_align(gem2fmt(flags), &gsize.tiled.width,
&gsize.tiled.height);
size = tiler_size(gem2fmt(flags), gsize.tiled.width,
gsize.tiled.height);
omap_obj->width = gsize.tiled.width;
omap_obj->height = gsize.tiled.height;
} else {
size = PAGE_ALIGN(gsize.bytes);
}
obj->funcs = &omap_gem_object_funcs;
/* Initialize the GEM object. */
if (!(flags & OMAP_BO_MEM_SHMEM)) {
drm_gem_private_object_init(dev, obj, size);
} else {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto err_free;
mapping = obj->filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
}
/* Allocate memory if needed. */
if (flags & OMAP_BO_MEM_DMA_API) {
omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
&omap_obj->dma_addr,
GFP_KERNEL);
if (!omap_obj->vaddr)
goto err_release;
}
mutex_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
mutex_unlock(&priv->list_lock);
return obj;
err_release:
drm_gem_object_release(obj);
err_free:
kfree(omap_obj);
return NULL;
}
struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
struct sg_table *sgt)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj;
union omap_gem_size gsize;
/* Without a DMM only physically contiguous buffers can be supported. */
if (sgt->orig_nents != 1 && !priv->has_dmm)
return ERR_PTR(-EINVAL);
gsize.bytes = PAGE_ALIGN(size);
obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
if (!obj)
return ERR_PTR(-ENOMEM);
omap_obj = to_omap_bo(obj);
mutex_lock(&omap_obj->lock);
omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) {
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
struct page **pages;
unsigned int npages;
unsigned int ret;
npages = DIV_ROUND_UP(size, PAGE_SIZE);
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
goto done;
}
omap_obj->pages = pages;
ret = drm_prime_sg_to_page_array(sgt, pages, npages);
if (ret) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
goto done;
}
}
done:
mutex_unlock(&omap_obj->lock);
return obj;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, u32 flags, u32 *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
omap_gem_free_object(obj);
return ret;
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(obj);
return 0;
}
/* -----------------------------------------------------------------------------
* Init & Cleanup
*/
/* If DMM is used, we need to set some stuff up.. */
void omap_gem_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_drm_usergart *usergart;
const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
};
int i, j;
if (!dmm_is_available()) {
/* DMM only supported on OMAP4 and later, so this isn't fatal */
dev_warn(dev->dev, "DMM not available, disable DMM support\n");
return;
}
usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
if (!usergart)
return;
/* reserve 4k aligned/wide regions for userspace mappings: */
for (i = 0; i < ARRAY_SIZE(fmts); i++) {
u16 h = 1, w = PAGE_SIZE >> i;
tiler_align(fmts[i], &w, &h);
/* note: since each region is 1 4kb page wide, and minimum
* number of rows, the height ends up being the same as the
* # of pages in the region
*/
usergart[i].height = h;
usergart[i].height_shift = ilog2(h);
usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
struct omap_drm_usergart_entry *entry;
struct tiler_block *block;
entry = &usergart[i].entry[j];
block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
if (IS_ERR(block)) {
dev_err(dev->dev,
"reserve failed: %d, %d, %ld\n",
i, j, PTR_ERR(block));
return;
}
entry->dma_addr = tiler_ssptr(block);
entry->block = block;
DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
&entry->dma_addr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
priv->usergart = usergart;
priv->has_dmm = true;
}
void omap_gem_deinit(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
/* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point.
*/
kfree(priv->usergart);
}
| linux-master | drivers/gpu/drm/omapdrm/omap_gem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
* Author: Benoit Parrot <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
/*
* overlay funcs
*/
static const char * const overlay_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
[OMAP_DSS_VIDEO2] = "vid2",
[OMAP_DSS_VIDEO3] = "vid3",
};
/*
* Find a free overlay with the required caps and supported fourcc
*/
static struct omap_hw_overlay *
omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[],
u32 caps, u32 fourcc)
{
struct omap_drm_private *priv = dev->dev_private;
int i;
DBG("caps: %x fourcc: %x", caps, fourcc);
for (i = 0; i < priv->num_ovls; i++) {
struct omap_hw_overlay *cur = priv->overlays[i];
DBG("%d: id: %d cur->caps: %x",
cur->idx, cur->id, cur->caps);
/* skip if already in-use */
if (hwoverlay_to_plane[cur->idx])
continue;
/* skip if doesn't support some required caps: */
if (caps & ~cur->caps)
continue;
/* check supported format */
if (!dispc_ovl_color_mode_supported(priv->dispc,
cur->id, fourcc))
continue;
return cur;
}
DBG("no match");
return NULL;
}
/*
* Assign a new overlay to a plane with the required caps and supported fourcc
* If a plane need a new overlay, the previous one should have been released
* with omap_overlay_release()
* This should be called from the plane atomic_check() in order to prepare the
* next global overlay_map to be enabled when atomic transaction is valid.
*/
int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
struct omap_hw_overlay **r_overlay)
{
/* Get the global state of the current atomic transaction */
struct omap_global_state *state = omap_get_global_state(s);
struct drm_plane **overlay_map = state->hwoverlay_to_plane;
struct omap_hw_overlay *ovl, *r_ovl;
ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc);
if (!ovl)
return -ENOMEM;
overlay_map[ovl->idx] = plane;
*overlay = ovl;
if (r_overlay) {
r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map,
caps, fourcc);
if (!r_ovl) {
overlay_map[ovl->idx] = NULL;
*overlay = NULL;
return -ENOMEM;
}
overlay_map[r_ovl->idx] = plane;
*r_overlay = r_ovl;
}
DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps);
if (r_overlay) {
DBG("%s: assign to right of plane %s caps %x",
r_ovl->name, plane->name, caps);
}
return 0;
}
/*
* Release an overlay from a plane if the plane gets not visible or the plane
* need a new overlay if overlay caps changes.
* This should be called from the plane atomic_check() in order to prepare the
* next global overlay_map to be enabled when atomic transaction is valid.
*/
void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay)
{
/* Get the global state of the current atomic transaction */
struct omap_global_state *state = omap_get_global_state(s);
struct drm_plane **overlay_map = state->hwoverlay_to_plane;
if (!overlay)
return;
if (WARN_ON(!overlay_map[overlay->idx]))
return;
DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name);
overlay_map[overlay->idx] = NULL;
}
/*
* Update an overlay state that was attached to a plane before the current atomic state.
* This should be called from the plane atomic_update() or atomic_disable(),
* where an overlay association to a plane could have changed between the old and current
* atomic state.
*/
void omap_overlay_update_state(struct omap_drm_private *priv,
struct omap_hw_overlay *overlay)
{
struct omap_global_state *state = omap_get_existing_global_state(priv);
struct drm_plane **overlay_map = state->hwoverlay_to_plane;
/* Check if this overlay is not used anymore, then disable it */
if (!overlay_map[overlay->idx]) {
DBG("%s: disabled", overlay->name);
/* disable the overlay */
dispc_ovl_enable(priv->dispc, overlay->id, false);
}
}
static void omap_overlay_destroy(struct omap_hw_overlay *overlay)
{
kfree(overlay);
}
static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id,
enum omap_overlay_caps caps)
{
struct omap_hw_overlay *overlay;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return ERR_PTR(-ENOMEM);
overlay->name = overlay_id_to_name[overlay_id];
overlay->id = overlay_id;
overlay->caps = caps;
return overlay;
}
int omap_hwoverlays_init(struct omap_drm_private *priv)
{
static const enum omap_plane_id hw_plane_ids[] = {
OMAP_DSS_GFX, OMAP_DSS_VIDEO1,
OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3,
};
u32 num_overlays = dispc_get_num_ovls(priv->dispc);
enum omap_overlay_caps caps;
int i, ret;
for (i = 0; i < num_overlays; i++) {
struct omap_hw_overlay *overlay;
caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]);
overlay = omap_overlay_init(hw_plane_ids[i], caps);
if (IS_ERR(overlay)) {
ret = PTR_ERR(overlay);
dev_err(priv->dev, "failed to construct overlay for %s (%d)\n",
overlay_id_to_name[i], ret);
omap_hwoverlays_destroy(priv);
return ret;
}
overlay->idx = priv->num_ovls;
priv->overlays[priv->num_ovls++] = overlay;
}
return 0;
}
void omap_hwoverlays_destroy(struct omap_drm_private *priv)
{
int i;
for (i = 0; i < priv->num_ovls; i++) {
omap_overlay_destroy(priv->overlays[i]);
priv->overlays[i] = NULL;
}
priv->num_ovls = 0;
}
| linux-master | drivers/gpu/drm/omapdrm/omap_overlay.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <drm/drm_prime.h>
#include "omap_drv.h"
MODULE_IMPORT_NS(DMA_BUF);
/* -----------------------------------------------------------------------------
* DMABUF Export
*/
static struct sg_table *omap_gem_map_dma_buf(
struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
sg = omap_gem_get_sg(obj, dir);
if (IS_ERR(sg))
return sg;
return sg;
}
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
omap_gem_put_sg(obj, sg);
}
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
return -ENOMEM;
}
/* make sure we have the pages: */
return omap_gem_get_pages(obj, &pages, true);
}
static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj);
return 0;
}
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = buffer->priv;
return drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
}
static const struct dma_buf_ops omap_dmabuf_ops = {
.map_dma_buf = omap_gem_map_dma_buf,
.unmap_dma_buf = omap_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
.mmap = omap_gem_dmabuf_mmap,
};
struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &omap_dmabuf_ops;
exp_info.size = omap_gem_mmap_size(obj);
exp_info.flags = flags;
exp_info.priv = obj;
exp_info.resv = obj->resv;
return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
/* -----------------------------------------------------------------------------
* DMABUF Import
*/
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
struct sg_table *sgt;
int ret;
if (dma_buf->ops == &omap_dmabuf_ops) {
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
return obj;
}
}
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
}
obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto fail_unmap;
}
obj->import_attach = attach;
return obj;
fail_unmap:
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_TO_DEVICE);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
*
* Authors: Ravi Ramachandra <[email protected]>,
* Lajos Molnar <[email protected]>
* Andy Gross <[email protected]>
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include "tcm.h"
static unsigned long mask[8];
/*
* pos position in bitmap
* w width in slots
* h height in slots
* map ptr to bitmap
* stride slots in a row
*/
static void free_slots(unsigned long pos, u16 w, u16 h,
unsigned long *map, u16 stride)
{
int i;
for (i = 0; i < h; i++, pos += stride)
bitmap_clear(map, pos, w);
}
/*
* w width in slots
* pos ptr to position
* map ptr to bitmap
* num_bits number of bits in bitmap
*/
static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map,
size_t num_bits)
{
unsigned long search_count = 0;
unsigned long bit;
bool area_found = false;
*pos = num_bits - w;
while (search_count < num_bits) {
bit = find_next_bit(map, num_bits, *pos);
if (bit - *pos >= w) {
/* found a long enough free area */
bitmap_set(map, *pos, w);
area_found = true;
break;
}
search_count = num_bits - bit + w;
*pos = bit - w;
}
return (area_found) ? 0 : -ENOMEM;
}
/*
* w = width in slots
* h = height in slots
* a = align in slots (mask, 2^n-1, 0 is unaligned)
* offset = offset in bytes from 4KiB
* pos = position in bitmap for buffer
* map = bitmap ptr
* num_bits = size of bitmap
* stride = bits in one row of container
*/
static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset,
unsigned long *pos, unsigned long slot_bytes,
unsigned long *map, size_t num_bits, size_t slot_stride)
{
int i;
unsigned long index;
bool area_free = false;
unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
unsigned long curr_bit = bit_offset;
/* reset alignment to 1 if we are matching a specific offset */
/* adjust alignment - 1 to get to the format expected in bitmaps */
a = (offset > 0) ? 0 : a - 1;
/* FIXME Return error if slots_per_band > stride */
while (curr_bit < num_bits) {
*pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w,
a);
/* skip forward if we are not at right offset */
if (bit_offset > 0 && (*pos % slots_per_band != bit_offset)) {
curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
continue;
}
/* skip forward to next row if we overlap end of row */
if ((*pos % slot_stride) + w > slot_stride) {
curr_bit = ALIGN(*pos, slot_stride) + bit_offset;
continue;
}
/* TODO: Handle overlapping 4K boundaries */
/* break out of look if we will go past end of container */
if ((*pos + slot_stride * h) > num_bits)
break;
/* generate mask that represents out matching pattern */
bitmap_clear(mask, 0, slot_stride);
bitmap_set(mask, (*pos % BITS_PER_LONG), w);
/* assume the area is free until we find an overlap */
area_free = true;
/* check subsequent rows to see if complete area is free */
for (i = 1; i < h; i++) {
index = *pos / BITS_PER_LONG + i * 8;
if (bitmap_intersects(&map[index], mask,
(*pos % BITS_PER_LONG) + w)) {
area_free = false;
break;
}
}
if (area_free)
break;
/* go forward past this match */
if (bit_offset > 0)
curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
else
curr_bit = *pos + a + 1;
}
if (area_free) {
/* set area as in-use. iterate over rows */
for (i = 0, index = *pos; i < h; i++, index += slot_stride)
bitmap_set(map, index, w);
}
return (area_free) ? 0 : -ENOMEM;
}
static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
struct tcm_area *area)
{
unsigned long pos;
int ret;
spin_lock(&(tcm->lock));
ret = r2l_b2t_1d(num_slots, &pos, tcm->bitmap, tcm->map_size);
if (!ret) {
area->p0.x = pos % tcm->width;
area->p0.y = pos / tcm->width;
area->p1.x = (pos + num_slots - 1) % tcm->width;
area->p1.y = (pos + num_slots - 1) / tcm->width;
}
spin_unlock(&(tcm->lock));
return ret;
}
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
unsigned long pos;
int ret;
spin_lock(&(tcm->lock));
ret = l2r_t2b(w, h, align, offset, &pos, slot_bytes, tcm->bitmap,
tcm->map_size, tcm->width);
if (!ret) {
area->p0.x = pos % tcm->width;
area->p0.y = pos / tcm->width;
area->p1.x = area->p0.x + w - 1;
area->p1.y = area->p0.y + h - 1;
}
spin_unlock(&(tcm->lock));
return ret;
}
static void sita_deinit(struct tcm *tcm)
{
kfree(tcm);
}
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
{
unsigned long pos;
u16 w, h;
pos = area->p0.x + area->p0.y * tcm->width;
if (area->is2d) {
w = area->p1.x - area->p0.x + 1;
h = area->p1.y - area->p0.y + 1;
} else {
w = area->p1.x + area->p1.y * tcm->width - pos + 1;
h = 1;
}
spin_lock(&(tcm->lock));
free_slots(pos, w, h, tcm->bitmap, tcm->width);
spin_unlock(&(tcm->lock));
return 0;
}
struct tcm *sita_init(u16 width, u16 height)
{
struct tcm *tcm;
size_t map_size = BITS_TO_LONGS(width*height) * sizeof(unsigned long);
if (width == 0 || height == 0)
return NULL;
tcm = kzalloc(sizeof(*tcm) + map_size, GFP_KERNEL);
if (!tcm)
goto error;
/* Updating the pointers to SiTA implementation APIs */
tcm->height = height;
tcm->width = width;
tcm->reserve_2d = sita_reserve_2d;
tcm->reserve_1d = sita_reserve_1d;
tcm->free = sita_free;
tcm->deinit = sita_deinit;
spin_lock_init(&tcm->lock);
tcm->bitmap = (unsigned long *)(tcm + 1);
bitmap_clear(tcm->bitmap, 0, width*height);
tcm->map_size = width*height;
return tcm;
error:
return NULL;
}
| linux-master | drivers/gpu/drm/omapdrm/tcm-sita.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/fb.h>
#include <drm/drm_drv.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_util.h>
#include "omap_drv.h"
#include "omap_fbdev.h"
MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
static bool ywrap_enabled = true;
module_param_named(ywrap, ywrap_enabled, bool, 0644);
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
struct omap_fbdev {
struct drm_fb_helper base;
bool ywrap_enabled;
/* for deferred dmm roll when getting called in atomic ctx */
struct work_struct work;
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi);
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
struct drm_fb_helper *helper = &fbdev->base;
struct fb_info *fbi = helper->info;
struct drm_gem_object *bo = drm_gem_fb_get_obj(helper->fb, 0);
int npages;
/* DMM roll shifts in 4K pages: */
npages = fbi->fix.line_length >> PAGE_SHIFT;
omap_gem_roll(bo, fbi->var.yoffset * npages);
}
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct drm_fb_helper *helper = get_fb(fbi);
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
if (!helper)
goto fallback;
if (!fbdev->ywrap_enabled)
goto fallback;
if (drm_can_sleep()) {
pan_worker(&fbdev->work);
} else {
struct omap_drm_private *priv = helper->dev->dev_private;
queue_work(priv->wq, &fbdev->work);
}
return 0;
fallback:
return drm_fb_helper_pan_display(var, fbi);
}
static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct drm_framebuffer *fb = helper->fb;
struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
}
static void omap_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_framebuffer *fb = helper->fb;
struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
DBG();
drm_fb_helper_fini(helper);
omap_gem_unpin(bo);
drm_framebuffer_remove(fb);
drm_client_release(&helper->client);
drm_fb_helper_unprepare(helper);
kfree(fbdev);
}
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
__FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_ioctl = drm_fb_helper_ioctl,
.fb_mmap = omap_fbdev_fb_mmap,
.fb_destroy = omap_fbdev_fb_destroy,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
struct drm_device *dev = helper->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
struct drm_gem_object *bo;
dma_addr_t dma_addr;
int ret;
sizes->surface_bpp = 32;
sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] =
DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
gsize = (union omap_gem_size){
.bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
};
DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
if (!bo) {
dev_err(dev->dev, "failed to allocate buffer object\n");
ret = -ENOMEM;
goto fail;
}
fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_put(bo);
ret = PTR_ERR(fb);
goto fail;
}
/* note: this keeps the bo pinned.. which is perhaps not ideal,
* but is needed as long as we use fb_mmap() to mmap to userspace
* (since this happens using fix.smem_start). Possibly we could
* implement our own mmap using GEM mmap support to avoid this
* (non-tiled buffer doesn't need to be pinned for fbcon to write
* to it). Then we just need to be sure that we are able to re-
* pin it in case of an opps.
*/
ret = omap_gem_pin(bo, &dma_addr);
if (ret) {
dev_err(dev->dev, "could not pin framebuffer\n");
ret = -ENOMEM;
goto fail;
}
fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
helper->fb = fb;
fbi->fbops = &omap_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);
fbi->flags |= FBINFO_VIRTFB;
fbi->screen_buffer = omap_gem_vaddr(bo);
fbi->screen_size = bo->size;
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = bo->size;
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
if (fbdev->ywrap_enabled) {
DRM_INFO("Enabling DMM ywrap scrolling\n");
fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
fbi->fix.ywrapstep = 1;
}
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fb->width, fb->height);
return 0;
fail:
if (ret) {
if (fb)
drm_framebuffer_remove(fb);
}
return ret;
}
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
{
if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
/* these are not the fb's you're looking for */
return NULL;
}
return fbi->par;
}
/*
* struct drm_client
*/
static void omap_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int omap_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int omap_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs omap_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = omap_fbdev_client_unregister,
.restore = omap_fbdev_client_restore,
.hotplug = omap_fbdev_client_hotplug,
};
void omap_fbdev_setup(struct drm_device *dev)
{
struct omap_fbdev *fbdev;
struct drm_fb_helper *helper;
int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
return;
helper = &fbdev->base;
drm_fb_helper_prepare(dev, helper, 32, &omap_fb_helper_funcs);
ret = drm_client_init(dev, &helper->client, "fbdev", &omap_fbdev_client_funcs);
if (ret)
goto err_drm_client_init;
INIT_WORK(&fbdev->work, pan_worker);
drm_client_register(&helper->client);
return;
err_drm_client_init:
drm_fb_helper_unprepare(helper);
kfree(fbdev);
}
| linux-master | drivers/gpu/drm/omapdrm/omap_fbdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <drm/drm_blend.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
/*
* framebuffer funcs
*/
static const u32 formats[] = {
/* 16bpp [A]RGB: */
DRM_FORMAT_RGB565, /* RGB16-565 */
DRM_FORMAT_RGBX4444, /* RGB12x-4444 */
DRM_FORMAT_XRGB4444, /* xRGB12-4444 */
DRM_FORMAT_RGBA4444, /* RGBA12-4444 */
DRM_FORMAT_ARGB4444, /* ARGB16-4444 */
DRM_FORMAT_XRGB1555, /* xRGB15-1555 */
DRM_FORMAT_ARGB1555, /* ARGB16-1555 */
/* 24bpp RGB: */
DRM_FORMAT_RGB888, /* RGB24-888 */
/* 32bpp [A]RGB: */
DRM_FORMAT_RGBX8888, /* RGBx24-8888 */
DRM_FORMAT_XRGB8888, /* xRGB24-8888 */
DRM_FORMAT_RGBA8888, /* RGBA32-8888 */
DRM_FORMAT_ARGB8888, /* ARGB32-8888 */
/* YUV: */
DRM_FORMAT_NV12,
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
};
/* per-plane info for the fb: */
struct plane {
dma_addr_t dma_addr;
};
#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
struct omap_framebuffer {
struct drm_framebuffer base;
int pin_count;
const struct drm_format_info *format;
struct plane planes[2];
/* lock for pinning (pin_count and planes.dma_addr) */
struct mutex lock;
};
static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct drm_crtc *crtc;
drm_modeset_lock_all(fb->dev);
drm_for_each_crtc(crtc, fb->dev)
omap_crtc_flush(crtc);
drm_modeset_unlock_all(fb->dev);
return 0;
}
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
.dirty = omap_framebuffer_dirty,
.destroy = drm_gem_fb_destroy,
};
static u32 get_linear_addr(struct drm_framebuffer *fb,
const struct drm_format_info *format, int n, int x, int y)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
struct plane *plane = &omap_fb->planes[n];
u32 offset;
offset = fb->offsets[n]
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
+ (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
static u32 drm_rotation_to_tiler(unsigned int drm_rot)
{
u32 orient;
switch (drm_rot & DRM_MODE_ROTATE_MASK) {
default:
case DRM_MODE_ROTATE_0:
orient = 0;
break;
case DRM_MODE_ROTATE_90:
orient = MASK_XY_FLIP | MASK_X_INVERT;
break;
case DRM_MODE_ROTATE_180:
orient = MASK_X_INVERT | MASK_Y_INVERT;
break;
case DRM_MODE_ROTATE_270:
orient = MASK_XY_FLIP | MASK_Y_INVERT;
break;
}
if (drm_rot & DRM_MODE_REFLECT_X)
orient ^= MASK_X_INVERT;
if (drm_rot & DRM_MODE_REFLECT_Y)
orient ^= MASK_Y_INVERT;
return orient;
}
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state,
struct omap_overlay_info *info,
struct omap_overlay_info *r_info)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
info->pos_x = state->crtc_x;
info->pos_y = state->crtc_y;
info->out_width = state->crtc_w;
info->out_height = state->crtc_h;
info->width = state->src_w >> 16;
info->height = state->src_h >> 16;
/* DSS driver wants the w & h in rotated orientation */
if (drm_rotation_90_or_270(state->rotation))
swap(info->width, info->height);
x = state->src_x >> 16;
y = state->src_y >> 16;
if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
orient = drm_rotation_to_tiler(state->rotation);
/*
* omap_gem_rotated_paddr() wants the x & y in tiler units.
* Usually tiler unit size is the same as the pixel size, except
* for YUV422 formats, for which the tiler unit size is 32 bits
* and pixel size is 16 bits.
*/
if (fb->format->format == DRM_FORMAT_UYVY ||
fb->format->format == DRM_FORMAT_YUYV) {
x /= 2;
w /= 2;
}
/* adjust x,y offset for invert: */
if (orient & MASK_Y_INVERT)
y += h - 1;
if (orient & MASK_X_INVERT)
x += w - 1;
/* Note: x and y are in TILER units, not pixels */
omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
/* Note: stride in TILER units, not pixels */
info->screen_width = omap_gem_tiled_stride(fb->obj[0], orient);
} else {
switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
case DRM_MODE_ROTATE_0:
/* OK */
break;
default:
dev_warn(fb->dev->dev,
"rotation '%d' ignored for non-tiled fb\n",
state->rotation);
break;
}
info->paddr = get_linear_addr(fb, format, 0, x, y);
info->rotation_type = OMAP_DSS_ROT_NONE;
info->rotation = DRM_MODE_ROTATE_0;
info->screen_width = fb->pitches[0];
}
/* convert to pixels: */
info->screen_width /= format->cpp[0];
if (fb->format->format == DRM_FORMAT_NV12) {
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
}
} else {
info->p_uv_addr = 0;
}
if (r_info) {
info->width /= 2;
info->out_width /= 2;
*r_info = *info;
if (fb->format->is_yuv) {
if (info->width & 1) {
info->width++;
r_info->width--;
}
if (info->out_width & 1) {
info->out_width++;
r_info->out_width--;
}
}
r_info->pos_x = info->pos_x + info->out_width;
r_info->paddr = get_linear_addr(fb, format, 0,
x + info->width, y);
if (fb->format->format == DRM_FORMAT_NV12) {
r_info->p_uv_addr =
get_linear_addr(fb, format, 1,
x + info->width, y);
}
}
}
/* pin, prepare for scanout: */
int omap_framebuffer_pin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
mutex_lock(&omap_fb->lock);
if (omap_fb->pin_count > 0) {
omap_fb->pin_count++;
mutex_unlock(&omap_fb->lock);
return 0;
}
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
if (ret)
goto fail;
omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
}
omap_fb->pin_count++;
mutex_unlock(&omap_fb->lock);
return 0;
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
return ret;
}
/* unpin, no longer being scanned out: */
void omap_framebuffer_unpin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
mutex_lock(&omap_fb->lock);
omap_fb->pin_count--;
if (omap_fb->pin_count > 0) {
mutex_unlock(&omap_fb->lock);
return;
}
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
}
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->format->format);
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[n], fb->pitches[i]);
omap_gem_describe(fb->obj[i], m);
}
}
#endif
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info = drm_get_format_info(dev,
mode_cmd);
unsigned int num_planes = info->num_planes;
struct drm_gem_object *bos[4];
struct drm_framebuffer *fb;
int i;
for (i = 0; i < num_planes; i++) {
bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!bos[i]) {
fb = ERR_PTR(-ENOENT);
goto error;
}
}
fb = omap_framebuffer_init(dev, mode_cmd, bos);
if (IS_ERR(fb))
goto error;
return fb;
error:
while (--i >= 0)
drm_gem_object_put(bos[i]);
return fb;
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
unsigned int pitch = mode_cmd->pitches[0];
int ret, i;
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
format = drm_get_format_info(dev, mode_cmd);
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
break;
}
if (!format || i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
}
omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
if (!omap_fb) {
ret = -ENOMEM;
goto fail;
}
fb = &omap_fb->base;
omap_fb->format = format;
mutex_init(&omap_fb->lock);
/*
* The code below assumes that no format use more than two planes, and
* that the two planes of multiplane formats need the same number of
* bytes per pixel.
*/
if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
ret = -EINVAL;
goto fail;
}
if (pitch % format->cpp[0]) {
dev_dbg(dev->dev,
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
pitch, format->cpp[0]);
ret = -EINVAL;
goto fail;
}
for (i = 0; i < format->num_planes; i++) {
struct plane *plane = &omap_fb->planes[i];
unsigned int vsub = i == 0 ? 1 : format->vsub;
unsigned int size;
size = pitch * mode_cmd->height / vsub;
if (size > omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i]) {
dev_dbg(dev->dev,
"provided buffer object is too small! %zu < %d\n",
bos[i]->size - mode_cmd->offsets[i], size);
ret = -EINVAL;
goto fail;
}
fb->obj[i] = bos[i];
plane->dma_addr = 0;
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
DBG("create: FB ID: %d (%p)", fb->base.id, fb);
return fb;
fail:
kfree(omap_fb);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/omapdrm/omap_fb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
/*
* plane funcs
*/
#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base)
struct omap_plane_state {
/* Must be first. */
struct drm_plane_state base;
struct omap_hw_overlay *overlay;
struct omap_hw_overlay *r_overlay; /* right overlay */
};
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
struct drm_plane base;
enum omap_plane_id id;
};
bool is_omap_plane_dual_overlay(struct drm_plane_state *state)
{
struct omap_plane_state *omap_state = to_omap_plane_state(state);
return !!omap_state->r_overlay;
}
static int omap_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
if (!new_state->fb)
return 0;
drm_gem_plane_helper_prepare_fb(plane, new_state);
return omap_framebuffer_pin(new_state->fb);
}
static void omap_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
if (old_state->fb)
omap_framebuffer_unpin(old_state->fb);
}
static void omap_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = plane->dev->dev_private;
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct omap_plane_state *new_omap_state;
struct omap_plane_state *old_omap_state;
struct omap_overlay_info info, r_info;
enum omap_plane_id ovl_id, r_ovl_id;
int ret;
bool dual_ovl;
new_omap_state = to_omap_plane_state(new_state);
old_omap_state = to_omap_plane_state(old_state);
dual_ovl = is_omap_plane_dual_overlay(new_state);
/* Cleanup previously held overlay if needed */
if (old_omap_state->overlay)
omap_overlay_update_state(priv, old_omap_state->overlay);
if (old_omap_state->r_overlay)
omap_overlay_update_state(priv, old_omap_state->r_overlay);
if (!new_omap_state->overlay) {
DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name);
return;
}
ovl_id = new_omap_state->overlay->id;
DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc,
new_state->fb);
memset(&info, 0, sizeof(info));
info.rotation_type = OMAP_DSS_ROT_NONE;
info.rotation = DRM_MODE_ROTATE_0;
info.global_alpha = new_state->alpha >> 8;
info.zorder = new_state->normalized_zpos;
if (new_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
info.pre_mult_alpha = 1;
else
info.pre_mult_alpha = 0;
info.color_encoding = new_state->color_encoding;
info.color_range = new_state->color_range;
r_info = info;
/* update scanout: */
omap_framebuffer_update_scanout(new_state->fb, new_state, &info,
dual_ovl ? &r_info : NULL);
DBG("%s: %dx%d -> %dx%d (%d)",
new_omap_state->overlay->name, info.width, info.height,
info.out_width, info.out_height, info.screen_width);
DBG("%d,%d %pad %pad", info.pos_x, info.pos_y,
&info.paddr, &info.p_uv_addr);
if (dual_ovl) {
r_ovl_id = new_omap_state->r_overlay->id;
/*
* If the current plane uses 2 hw planes the very next
* zorder is used by the r_overlay so we just use the
* main overlay zorder + 1
*/
r_info.zorder = info.zorder + 1;
DBG("%s: %dx%d -> %dx%d (%d)",
new_omap_state->r_overlay->name,
r_info.width, r_info.height,
r_info.out_width, r_info.out_height, r_info.screen_width);
DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y,
&r_info.paddr, &r_info.p_uv_addr);
}
/* and finally, update omapdss: */
ret = dispc_ovl_setup(priv->dispc, ovl_id, &info,
omap_crtc_timings(new_state->crtc), false,
omap_crtc_channel(new_state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
plane->name);
dispc_ovl_enable(priv->dispc, ovl_id, false);
return;
}
dispc_ovl_enable(priv->dispc, ovl_id, true);
if (dual_ovl) {
ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info,
omap_crtc_timings(new_state->crtc), false,
omap_crtc_channel(new_state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n",
plane->name);
dispc_ovl_enable(priv->dispc, r_ovl_id, false);
dispc_ovl_enable(priv->dispc, ovl_id, false);
return;
}
dispc_ovl_enable(priv->dispc, r_ovl_id, true);
}
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct omap_plane_state *new_omap_state;
struct omap_plane_state *old_omap_state;
new_omap_state = to_omap_plane_state(new_state);
old_omap_state = to_omap_plane_state(old_state);
if (!old_omap_state->overlay)
return;
new_state->rotation = DRM_MODE_ROTATE_0;
new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id;
omap_overlay_update_state(priv, old_omap_state->overlay);
new_omap_state->overlay = NULL;
if (is_omap_plane_dual_overlay(old_state)) {
omap_overlay_update_state(priv, old_omap_state->r_overlay);
new_omap_state->r_overlay = NULL;
}
}
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int omap_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
plane);
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state);
struct omap_global_state *omap_overlay_global_state;
struct drm_crtc_state *crtc_state;
bool new_r_hw_overlay = false;
bool new_hw_overlay = false;
u32 max_width, max_height;
struct drm_crtc *crtc;
u16 width, height;
u32 caps = 0;
u32 fourcc;
int ret;
omap_overlay_global_state = omap_get_global_state(state);
if (IS_ERR(omap_overlay_global_state))
return PTR_ERR(omap_overlay_global_state);
dispc_ovl_get_max_size(priv->dispc, &width, &height);
max_width = width << 16;
max_height = height << 16;
crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
/*
* Note: these are just sanity checks to filter out totally bad scaling
* factors. The real limits must be calculated case by case, and
* unfortunately we currently do those checks only at the commit
* phase in dispc.
*/
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
FRAC_16_16(1, 8), FRAC_16_16(8, 1),
true, true);
if (ret)
return ret;
DBG("%s: visible %d -> %d", plane->name,
old_plane_state->visible, new_plane_state->visible);
if (!new_plane_state->visible) {
omap_overlay_release(state, omap_state->overlay);
omap_overlay_release(state, omap_state->r_overlay);
omap_state->overlay = NULL;
omap_state->r_overlay = NULL;
return 0;
}
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0)
return -EINVAL;
if (new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->adjusted_mode.hdisplay)
return -EINVAL;
if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
/* Make sure dimensions are within bounds. */
if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height)
return -EINVAL;
if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) {
bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv;
if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) ||
new_plane_state->crtc_w / 2 & 1)) {
/*
* When calculating the split overlay width
* and it yield an odd value we will need to adjust
* the indivual width +/- 1. So make sure it fits
*/
if (new_plane_state->src_w <= ((2 * width - 1) << 16) &&
new_plane_state->crtc_w <= (2 * width - 1))
new_r_hw_overlay = true;
else
return -EINVAL;
} else {
if (new_plane_state->src_w <= (2 * max_width) &&
new_plane_state->crtc_w <= (2 * width))
new_r_hw_overlay = true;
else
return -EINVAL;
}
}
if (new_plane_state->rotation != DRM_MODE_ROTATE_0 &&
!omap_framebuffer_supports_rotation(new_plane_state->fb))
return -EINVAL;
if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w ||
(new_plane_state->src_h >> 16) != new_plane_state->crtc_h)
caps |= OMAP_DSS_OVL_CAP_SCALE;
fourcc = new_plane_state->fb->format->format;
/*
* (re)allocate hw overlay if we don't have one or
* there is a caps mismatch
*/
if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) {
new_hw_overlay = true;
} else {
/* check supported format */
if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id,
fourcc))
new_hw_overlay = true;
}
/*
* check if we need two overlays and only have 1 or
* if we had 2 overlays but will only need 1
*/
if ((new_r_hw_overlay && !omap_state->r_overlay) ||
(!new_r_hw_overlay && omap_state->r_overlay))
new_hw_overlay = true;
if (new_hw_overlay) {
struct omap_hw_overlay *old_ovl = omap_state->overlay;
struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay;
struct omap_hw_overlay *new_ovl = NULL;
struct omap_hw_overlay *new_r_ovl = NULL;
omap_overlay_release(state, old_ovl);
omap_overlay_release(state, old_r_ovl);
ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl,
new_r_hw_overlay ? &new_r_ovl : NULL);
if (ret) {
DBG("%s: failed to assign hw_overlay", plane->name);
omap_state->overlay = NULL;
omap_state->r_overlay = NULL;
return ret;
}
omap_state->overlay = new_ovl;
if (new_r_hw_overlay)
omap_state->r_overlay = new_r_ovl;
else
omap_state->r_overlay = NULL;
}
DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id);
if (omap_state->r_overlay)
DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id);
return 0;
}
static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.prepare_fb = omap_plane_prepare_fb,
.cleanup_fb = omap_plane_cleanup_fb,
.atomic_check = omap_plane_atomic_check,
.atomic_update = omap_plane_atomic_update,
.atomic_disable = omap_plane_atomic_disable,
};
static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
DBG("%s", plane->name);
drm_plane_cleanup(plane);
kfree(omap_plane);
}
/* helper to install properties which are common to planes and crtcs */
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
struct drm_device *dev = plane->dev;
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
if (!plane->rotation_property)
drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
/* Attach the rotation property also to the crtc object */
if (plane->rotation_property && obj != &plane->base)
drm_object_attach_property(obj, plane->rotation_property,
DRM_MODE_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);
}
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane_state *omap_state;
if (plane->state)
drm_atomic_helper_plane_destroy_state(plane, plane->state);
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
if (!omap_state)
return;
__drm_atomic_helper_plane_reset(plane, &omap_state->base);
}
static struct drm_plane_state *
omap_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct omap_plane_state *state, *current_state;
if (WARN_ON(!plane->state))
return NULL;
current_state = to_omap_plane_state(plane->state);
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->overlay = current_state->overlay;
state->r_overlay = current_state->r_overlay;
return &state->base;
}
static void omap_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct omap_plane_state *omap_state = to_omap_plane_state(state);
if (omap_state->overlay)
drm_printf(p, "\toverlay=%s (caps=0x%x)\n",
omap_state->overlay->name,
omap_state->overlay->caps);
else
drm_printf(p, "\toverlay=None\n");
if (omap_state->r_overlay)
drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n",
omap_state->r_overlay->name,
omap_state->r_overlay->caps);
else
drm_printf(p, "\tr_overlay=None\n");
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
u64 val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
if (property == priv->zorder_prop)
state->zpos = val;
else
return -EINVAL;
return 0;
}
static int omap_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
u64 *val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
if (property == priv->zorder_prop)
*val = state->zpos;
else
return -EINVAL;
return 0;
}
static const struct drm_plane_funcs omap_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = omap_plane_reset,
.destroy = omap_plane_destroy,
.atomic_duplicate_state = omap_plane_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property,
.atomic_get_property = omap_plane_atomic_get_property,
.atomic_print_state = omap_plane_atomic_print_state,
};
static bool omap_plane_supports_yuv(struct drm_plane *plane)
{
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
const u32 *formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
u32 i;
for (i = 0; formats[i]; i++)
if (formats[i] == DRM_FORMAT_YUYV ||
formats[i] == DRM_FORMAT_UYVY ||
formats[i] == DRM_FORMAT_NV12)
return true;
return false;
}
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
int idx, enum drm_plane_type type,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
unsigned int zpos;
int ret;
u32 nformats;
const u32 *formats;
if (WARN_ON(idx >= num_planes))
return ERR_PTR(-EINVAL);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
return ERR_PTR(-ENOMEM);
omap_plane->id = idx;
DBG("%d: type=%d", omap_plane->id, type);
DBG(" crtc_mask: 0x%04x", possible_crtcs);
formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
for (nformats = 0; formats[nformats]; ++nformats)
;
plane = &omap_plane->base;
ret = drm_universal_plane_init(dev, plane, possible_crtcs,
&omap_plane_funcs, formats,
nformats, NULL, type, NULL);
if (ret < 0)
goto error;
drm_plane_helper_add(plane, &omap_plane_helper_funcs);
omap_plane_install_properties(plane, &plane->base);
/*
* Set the zpos default depending on whether we are a primary or overlay
* plane.
*/
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
zpos = 0;
else
zpos = omap_plane->id;
drm_plane_create_zpos_property(plane, zpos, 0, num_planes - 1);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
if (omap_plane_supports_yuv(plane))
drm_plane_create_color_properties(plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_FULL_RANGE);
return plane;
error:
dev_err(dev->dev, "%s(): could not create plane: %d\n",
__func__, omap_plane->id);
kfree(omap_plane);
return NULL;
}
| linux-master | drivers/gpu/drm/omapdrm/omap_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_vblank.h>
#include "omap_drv.h"
struct omap_irq_wait {
struct list_head node;
wait_queue_head_t wq;
u32 irqmask;
int count;
};
/* call with wait_lock and dispc runtime held */
static void omap_irq_update(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait;
u32 irqmask = priv->irq_mask;
assert_spin_locked(&priv->wait_lock);
list_for_each_entry(wait, &priv->wait_list, node)
irqmask |= wait->irqmask;
DBG("irqmask=%08x", irqmask);
dispc_write_irqenable(priv->dispc, irqmask);
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
{
wait->count--;
wake_up(&wait->wq);
}
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
u32 irqmask, int count)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
unsigned long flags;
init_waitqueue_head(&wait->wq);
wait->irqmask = irqmask;
wait->count = count;
spin_lock_irqsave(&priv->wait_lock, flags);
list_add(&wait->node, &priv->wait_list);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
return wait;
}
int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
unsigned long timeout)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned long flags;
int ret;
ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
spin_lock_irqsave(&priv->wait_lock, flags);
list_del(&wait->node);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
kfree(wait);
return ret == 0 ? -1 : 0;
}
int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
{
struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
unsigned long flags;
enum omap_channel channel = omap_crtc_channel(crtc);
int framedone_irq =
dispc_mgr_get_framedone_irq(priv->dispc, channel);
DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
spin_lock_irqsave(&priv->wait_lock, flags);
if (enable)
priv->irq_mask |= framedone_irq;
else
priv->irq_mask &= ~framedone_irq;
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
return 0;
}
/**
* omap_irq_enable_vblank - enable vblank interrupt events
* @crtc: DRM CRTC
*
* Enable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*
* RETURNS
* Zero on success, appropriate errno if the given @crtc's vblank
* interrupt cannot be enabled.
*/
int omap_irq_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
unsigned long flags;
enum omap_channel channel = omap_crtc_channel(crtc);
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
return 0;
}
/**
* omap_irq_disable_vblank - disable vblank interrupt events
* @crtc: DRM CRTC
*
* Disable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*/
void omap_irq_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
unsigned long flags;
enum omap_channel channel = omap_crtc_channel(crtc);
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
}
static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
u32 irqstatus)
{
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
static const struct {
const char *name;
u32 mask;
} sources[] = {
{ "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
{ "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
{ "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
{ "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
};
const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
| DISPC_IRQ_VID1_FIFO_UNDERFLOW
| DISPC_IRQ_VID2_FIFO_UNDERFLOW
| DISPC_IRQ_VID3_FIFO_UNDERFLOW;
unsigned int i;
spin_lock(&priv->wait_lock);
irqstatus &= priv->irq_mask & mask;
spin_unlock(&priv->wait_lock);
if (!irqstatus)
return;
if (!__ratelimit(&_rs))
return;
DRM_ERROR("FIFO underflow on ");
for (i = 0; i < ARRAY_SIZE(sources); ++i) {
if (sources[i].mask & irqstatus)
pr_cont("%s ", sources[i].name);
}
pr_cont("(0x%08x)\n", irqstatus);
}
static void omap_irq_ocp_error_handler(struct drm_device *dev,
u32 irqstatus)
{
if (!(irqstatus & DISPC_IRQ_OCP_ERR))
return;
dev_err_ratelimited(dev->dev, "OCP error\n");
}
static irqreturn_t omap_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait, *n;
unsigned long flags;
unsigned int id;
u32 irqstatus;
irqstatus = dispc_read_irqstatus(priv->dispc);
dispc_clear_irqstatus(priv->dispc, irqstatus);
dispc_read_irqstatus(priv->dispc); /* flush posted write */
VERB("irqs: %08x", irqstatus);
for (id = 0; id < priv->num_pipes; id++) {
struct drm_crtc *crtc = priv->pipes[id].crtc;
enum omap_channel channel = omap_crtc_channel(crtc);
if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
drm_handle_vblank(dev, id);
omap_crtc_vblank_irq(crtc);
}
if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
omap_crtc_framedone_irq(crtc, irqstatus);
}
omap_irq_ocp_error_handler(dev, irqstatus);
omap_irq_fifo_underflow(priv, irqstatus);
spin_lock_irqsave(&priv->wait_lock, flags);
list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
if (wait->irqmask & irqstatus)
omap_irq_wait_handler(wait);
}
spin_unlock_irqrestore(&priv->wait_lock, flags);
return IRQ_HANDLED;
}
static const u32 omap_underflow_irqs[] = {
[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int max_planes;
unsigned int i;
int ret;
spin_lock_init(&priv->wait_lock);
INIT_LIST_HEAD(&priv->wait_list);
priv->irq_mask = DISPC_IRQ_OCP_ERR;
max_planes = min(ARRAY_SIZE(priv->planes),
ARRAY_SIZE(omap_underflow_irqs));
for (i = 0; i < max_planes; ++i) {
if (priv->planes[i])
priv->irq_mask |= omap_underflow_irqs[i];
}
for (i = 0; i < num_mgrs; ++i)
priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
dispc_runtime_get(priv->dispc);
dispc_clear_irqstatus(priv->dispc, 0xffffffff);
dispc_runtime_put(priv->dispc);
ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
if (ret < 0)
return ret;
priv->irq_enabled = true;
return 0;
}
void omap_drm_irq_uninstall(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
if (!priv->irq_enabled)
return;
priv->irq_enabled = false;
dispc_free_irq(priv->dispc, dev);
}
| linux-master | drivers/gpu/drm/omapdrm/omap_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/seq_file.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
#ifdef CONFIG_DEBUG_FS
static int gem_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
seq_printf(m, "All Objects:\n");
mutex_lock(&priv->list_lock);
omap_gem_describe_objects(&priv->obj_list, m);
mutex_unlock(&priv->list_lock);
return 0;
}
static int mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
return 0;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
static int fb_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_fb_helper *helper = dev->fb_helper;
struct drm_framebuffer *fb;
seq_printf(m, "fbcon ");
omap_framebuffer_describe(helper->fb, m);
mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == helper->fb)
continue;
seq_printf(m, "user ");
omap_framebuffer_describe(fb, m);
}
mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
#endif
/* list of debufs files that are applicable to all devices */
static struct drm_info_list omap_debugfs_list[] = {
{"gem", gem_show, 0},
{"mm", mm_show, 0},
#ifdef CONFIG_DRM_FBDEV_EMULATION
{"fb", fb_show, 0},
#endif
};
/* list of debugfs files that are specific to devices with dmm/tiler */
static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
void omap_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list),
minor->debugfs_root, minor);
if (dmm_is_available())
drm_debugfs_create_files(omap_dmm_debugfs_list,
ARRAY_SIZE(omap_dmm_debugfs_list),
minor->debugfs_root, minor);
}
#endif
| linux-master | drivers/gpu/drm/omapdrm/omap_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/math64.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
#include <drm/drm_vblank.h>
#include "omap_drv.h"
#define to_omap_crtc_state(x) container_of(x, struct omap_crtc_state, base)
struct omap_crtc_state {
/* Must be first. */
struct drm_crtc_state base;
/* Shadow values for legacy userspace support. */
unsigned int rotation;
unsigned int zpos;
bool manually_updated;
};
#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
struct omap_crtc {
struct drm_crtc base;
const char *name;
struct omap_drm_pipeline *pipe;
enum omap_channel channel;
struct videomode vm;
bool ignore_digit_sync_lost;
bool enabled;
bool pending;
wait_queue_head_t pending_wait;
struct drm_pending_vblank_event *event;
struct delayed_work update_work;
void (*framedone_handler)(void *);
void *framedone_handler_data;
};
/* -----------------------------------------------------------------------------
* Helper Functions
*/
struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
return &omap_crtc->vm;
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
return omap_crtc->channel;
}
static bool omap_crtc_is_pending(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
unsigned long flags;
bool pending;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
pending = omap_crtc->pending;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
return pending;
}
int omap_crtc_wait_pending(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
/*
* Timeout is set to a "sufficiently" high value, which should cover
* a single frame refresh even on slower displays.
*/
return wait_event_timeout(omap_crtc->pending_wait,
!omap_crtc_is_pending(crtc),
msecs_to_jiffies(250));
}
/* -----------------------------------------------------------------------------
* DSS Manager Functions
*/
/*
* Manager-ops, callbacks from output when they need to configure
* the upstream part of the video pipe.
*/
void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
dispc_mgr_enable(priv->dispc, channel, true);
}
/* Called only from the encoder enable/disable and suspend/resume handlers. */
void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
{
struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
enum omap_channel channel = omap_crtc->channel;
struct omap_irq_wait *wait;
u32 framedone_irq, vsync_irq;
int ret;
if (WARN_ON(omap_crtc->enabled == enable))
return;
if (omap_state->manually_updated) {
omap_irq_enable_framedone(crtc, enable);
omap_crtc->enabled = enable;
return;
}
if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
}
if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
/*
* Digit output produces some sync lost interrupts during the
* first frame when enabling, so we need to ignore those.
*/
omap_crtc->ignore_digit_sync_lost = true;
}
framedone_irq = dispc_mgr_get_framedone_irq(priv->dispc,
channel);
vsync_irq = dispc_mgr_get_vsync_irq(priv->dispc, channel);
if (enable) {
wait = omap_irq_wait_init(dev, vsync_irq, 1);
} else {
/*
* When we disable the digit output, we need to wait for
* FRAMEDONE to know that DISPC has finished with the output.
*
* OMAP2/3 does not have FRAMEDONE irq for digit output, and in
* that case we need to use vsync interrupt, and wait for both
* even and odd frames.
*/
if (framedone_irq)
wait = omap_irq_wait_init(dev, framedone_irq, 1);
else
wait = omap_irq_wait_init(dev, vsync_irq, 2);
}
dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
if (ret) {
dev_err(dev->dev, "%s: timeout waiting for %s\n",
omap_crtc->name, enable ? "enable" : "disable");
}
if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
omap_crtc->ignore_digit_sync_lost = false;
/* make sure the irq handler sees the value above */
mb();
}
}
int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
dispc_mgr_set_timings(priv->dispc, omap_crtc->channel,
&omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
omap_crtc_set_enabled(&omap_crtc->base, false);
}
void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
enum omap_channel channel,
const struct videomode *vm)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
omap_crtc->vm = *vm;
}
void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
dispc_mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
config);
}
int omap_crtc_dss_register_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_device *dev = omap_crtc->base.dev;
if (omap_crtc->framedone_handler)
return -EBUSY;
dev_dbg(dev->dev, "register framedone %s", omap_crtc->name);
omap_crtc->framedone_handler = handler;
omap_crtc->framedone_handler_data = data;
return 0;
}
void omap_crtc_dss_unregister_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_device *dev = omap_crtc->base.dev;
dev_dbg(dev->dev, "unregister framedone %s", omap_crtc->name);
WARN_ON(omap_crtc->framedone_handler != handler);
WARN_ON(omap_crtc->framedone_handler_data != data);
omap_crtc->framedone_handler = NULL;
omap_crtc->framedone_handler_data = NULL;
}
/* -----------------------------------------------------------------------------
* Setup, Flush and Page Flip
*/
void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
if (omap_crtc->ignore_digit_sync_lost) {
irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
if (!irqstatus)
return;
}
DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus);
}
void omap_crtc_vblank_irq(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_device *dev = omap_crtc->base.dev;
struct omap_drm_private *priv = dev->dev_private;
bool pending;
spin_lock(&crtc->dev->event_lock);
/*
* If the dispc is busy we're racing the flush operation. Try again on
* the next vblank interrupt.
*/
if (dispc_mgr_go_busy(priv->dispc, omap_crtc->channel)) {
spin_unlock(&crtc->dev->event_lock);
return;
}
/* Send the vblank event if one has been requested. */
if (omap_crtc->event) {
drm_crtc_send_vblank_event(crtc, omap_crtc->event);
omap_crtc->event = NULL;
}
pending = omap_crtc->pending;
omap_crtc->pending = false;
spin_unlock(&crtc->dev->event_lock);
if (pending)
drm_crtc_vblank_put(crtc);
/* Wake up omap_atomic_complete. */
wake_up(&omap_crtc->pending_wait);
DBG("%s: apply done", omap_crtc->name);
}
void omap_crtc_framedone_irq(struct drm_crtc *crtc, uint32_t irqstatus)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
if (!omap_crtc->framedone_handler)
return;
omap_crtc->framedone_handler(omap_crtc->framedone_handler_data);
spin_lock(&crtc->dev->event_lock);
/* Send the vblank event if one has been requested. */
if (omap_crtc->event) {
drm_crtc_send_vblank_event(crtc, omap_crtc->event);
omap_crtc->event = NULL;
}
omap_crtc->pending = false;
spin_unlock(&crtc->dev->event_lock);
/* Wake up omap_atomic_complete. */
wake_up(&omap_crtc->pending_wait);
}
void omap_crtc_flush(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
if (!omap_state->manually_updated)
return;
if (!delayed_work_pending(&omap_crtc->update_work))
schedule_delayed_work(&omap_crtc->update_work, 0);
}
static void omap_crtc_manual_display_update(struct work_struct *data)
{
struct omap_crtc *omap_crtc =
container_of(data, struct omap_crtc, update_work.work);
struct omap_dss_device *dssdev = omap_crtc->pipe->output;
struct drm_device *dev = omap_crtc->base.dev;
int ret;
if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->update)
return;
ret = dssdev->dsi_ops->update(dssdev);
if (ret < 0) {
spin_lock_irq(&dev->event_lock);
omap_crtc->pending = false;
spin_unlock_irq(&dev->event_lock);
wake_up(&omap_crtc->pending_wait);
}
}
static s16 omap_crtc_s31_32_to_s2_8(s64 coef)
{
u64 sign_bit = 1ULL << 63;
u64 cbits = (u64)coef;
s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1ff);
if (cbits & sign_bit)
ret = -ret;
return ret;
}
static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm,
struct omap_dss_cpr_coefs *cpr)
{
cpr->rr = omap_crtc_s31_32_to_s2_8(ctm->matrix[0]);
cpr->rg = omap_crtc_s31_32_to_s2_8(ctm->matrix[1]);
cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]);
cpr->gr = omap_crtc_s31_32_to_s2_8(ctm->matrix[3]);
cpr->gg = omap_crtc_s31_32_to_s2_8(ctm->matrix[4]);
cpr->gb = omap_crtc_s31_32_to_s2_8(ctm->matrix[5]);
cpr->br = omap_crtc_s31_32_to_s2_8(ctm->matrix[6]);
cpr->bg = omap_crtc_s31_32_to_s2_8(ctm->matrix[7]);
cpr->bb = omap_crtc_s31_32_to_s2_8(ctm->matrix[8]);
}
static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_overlay_manager_info info;
memset(&info, 0, sizeof(info));
info.default_color = 0x000000;
info.trans_enabled = false;
info.partial_alpha_enabled = false;
if (crtc->state->ctm) {
struct drm_color_ctm *ctm = crtc->state->ctm->data;
info.cpr_enable = true;
omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs);
} else {
info.cpr_enable = false;
}
dispc_mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
/* -----------------------------------------------------------------------------
* CRTC Functions
*/
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
drm_crtc_cleanup(crtc);
kfree(omap_crtc);
}
static void omap_crtc_arm_event(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
WARN_ON(omap_crtc->pending);
omap_crtc->pending = true;
if (crtc->state->event) {
omap_crtc->event = crtc->state->event;
crtc->state->event = NULL;
}
}
static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
int ret;
DBG("%s", omap_crtc->name);
dispc_runtime_get(priv->dispc);
/* manual updated display will not trigger vsync irq */
if (omap_state->manually_updated)
return;
drm_crtc_vblank_on(crtc);
ret = drm_crtc_vblank_get(crtc);
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_device *dev = crtc->dev;
DBG("%s", omap_crtc->name);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
cancel_delayed_work(&omap_crtc->update_work);
if (!omap_crtc_wait_pending(crtc))
dev_warn(dev->dev, "manual display update did not finish!");
drm_crtc_vblank_off(crtc);
dispc_runtime_put(priv->dispc);
}
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct videomode vm = {0};
int r;
drm_display_mode_to_videomode(mode, &vm);
/*
* DSI might not call this, since the supplied mode is not a
* valid DISPC mode. DSI will calculate and configure the
* proper DISPC mode later.
*/
if (omap_crtc->pipe->output->type != OMAP_DISPLAY_TYPE_DSI) {
r = dispc_mgr_check_timings(priv->dispc,
omap_crtc->channel,
&vm);
if (r)
return r;
}
/* Check for bandwidth limit */
if (priv->max_bandwidth) {
/*
* Estimation for the bandwidth need of a given mode with one
* full screen plane:
* bandwidth = resolution * 32bpp * (pclk / (vtotal * htotal))
* ^^ Refresh rate ^^
*
* The interlaced mode is taken into account by using the
* pixelclock in the calculation.
*
* The equation is rearranged for 64bit arithmetic.
*/
uint64_t bandwidth = mode->clock * 1000;
unsigned int bpp = 4;
bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
/*
* Reject modes which would need more bandwidth if used with one
* full resolution plane (most common use case).
*/
if (priv->max_bandwidth < bandwidth)
return MODE_BAD;
}
return MODE_OK;
}
static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: " DRM_MODE_FMT,
omap_crtc->name, DRM_MODE_ARG(mode));
drm_display_mode_to_videomode(mode, &omap_crtc->vm);
}
static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_dss_device *dssdev = omap_crtc->pipe->output;
if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->is_video_mode)
return false;
if (dssdev->dsi_ops->is_video_mode(dssdev))
return false;
DBG("detected manually updated display!");
return true;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_plane_state *pri_state;
if (crtc_state->color_mgmt_changed && crtc_state->degamma_lut) {
unsigned int length = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
return -EINVAL;
}
pri_state = drm_atomic_get_new_plane_state(state,
crtc->primary);
if (pri_state) {
struct omap_crtc_state *omap_crtc_state =
to_omap_crtc_state(crtc_state);
/* Mirror new values for zpos and rotation in omap_crtc_state */
omap_crtc_state->zpos = pri_state->zpos;
omap_crtc_state->rotation = pri_state->rotation;
/* Check if this CRTC is for a manually updated display */
omap_crtc_state->manually_updated = omap_crtc_is_manually_updated(crtc);
}
return 0;
}
static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
}
static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_crtc_state *omap_crtc_state = to_omap_crtc_state(crtc->state);
int ret;
if (crtc->state->color_mgmt_changed) {
struct drm_color_lut *lut = NULL;
unsigned int length = 0;
if (crtc->state->degamma_lut) {
lut = (struct drm_color_lut *)
crtc->state->degamma_lut->data;
length = crtc->state->degamma_lut->length /
sizeof(*lut);
}
dispc_mgr_set_gamma(priv->dispc, omap_crtc->channel,
lut, length);
}
omap_crtc_write_crtc_properties(crtc);
/* Only flush the CRTC if it is currently enabled. */
if (!omap_crtc->enabled)
return;
DBG("%s: GO", omap_crtc->name);
if (omap_crtc_state->manually_updated) {
/* send new image for page flips and modeset changes */
spin_lock_irq(&crtc->dev->event_lock);
omap_crtc_flush(crtc);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
return;
}
ret = drm_crtc_vblank_get(crtc);
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
dispc_mgr_go(priv->dispc, omap_crtc->channel);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
u64 val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct drm_plane_state *plane_state;
/*
* Delegate property set to the primary plane. Get the plane state and
* set the property directly, the shadow copy will be assigned in the
* omap_crtc_atomic_check callback. This way updates to plane state will
* always be mirrored in the crtc state correctly.
*/
plane_state = drm_atomic_get_plane_state(state->state, crtc->primary);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
if (property == crtc->primary->rotation_property)
plane_state->rotation = val;
else if (property == priv->zorder_prop)
plane_state->zpos = val;
else
return -EINVAL;
return 0;
}
static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
u64 *val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
if (property == crtc->primary->rotation_property)
*val = omap_state->rotation;
else if (property == priv->zorder_prop)
*val = omap_state->zpos;
else
return -EINVAL;
return 0;
}
static void omap_crtc_reset(struct drm_crtc *crtc)
{
struct omap_crtc_state *state;
if (crtc->state)
__drm_atomic_helper_crtc_destroy_state(crtc->state);
kfree(crtc->state);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
}
static struct drm_crtc_state *
omap_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct omap_crtc_state *state, *current_state;
if (WARN_ON(!crtc->state))
return NULL;
current_state = to_omap_crtc_state(crtc->state);
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
state->zpos = current_state->zpos;
state->rotation = current_state->rotation;
state->manually_updated = current_state->manually_updated;
return &state->base;
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
.reset = omap_crtc_reset,
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = omap_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_set_property = omap_crtc_atomic_set_property,
.atomic_get_property = omap_crtc_atomic_get_property,
.enable_vblank = omap_irq_enable_vblank,
.disable_vblank = omap_irq_disable_vblank,
};
static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_nofb = omap_crtc_mode_set_nofb,
.atomic_check = omap_crtc_atomic_check,
.atomic_begin = omap_crtc_atomic_begin,
.atomic_flush = omap_crtc_atomic_flush,
.atomic_enable = omap_crtc_atomic_enable,
.atomic_disable = omap_crtc_atomic_disable,
.mode_valid = omap_crtc_mode_valid,
};
/* -----------------------------------------------------------------------------
* Init and Cleanup
*/
static const char *channel_names[] = {
[OMAP_DSS_CHANNEL_LCD] = "lcd",
[OMAP_DSS_CHANNEL_DIGIT] = "tv",
[OMAP_DSS_CHANNEL_LCD2] = "lcd2",
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
struct drm_plane *plane)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc;
enum omap_channel channel;
int ret;
channel = pipe->output->dispc_channel;
DBG("%s", channel_names[channel]);
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc)
return ERR_PTR(-ENOMEM);
crtc = &omap_crtc->base;
init_waitqueue_head(&omap_crtc->pending_wait);
omap_crtc->pipe = pipe;
omap_crtc->channel = channel;
omap_crtc->name = channel_names[channel];
/*
* We want to refresh manually updated displays from dirty callback,
* which is called quite often (e.g. for each drawn line). This will
* be used to do the display update asynchronously to avoid blocking
* the rendering process and merges multiple dirty calls into one
* update if they arrive very fast. We also call this function for
* atomic display updates (e.g. for page flips), which means we do
* not need extra locking. Atomic updates should be synchronous, but
* need to wait for the framedone interrupt anyways.
*/
INIT_DELAYED_WORK(&omap_crtc->update_work,
omap_crtc_manual_display_update);
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&omap_crtc_funcs, NULL);
if (ret < 0) {
dev_err(dev->dev, "%s(): could not init crtc for: %s\n",
__func__, pipe->output->name);
kfree(omap_crtc);
return ERR_PTR(ret);
}
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
/* The dispc API adapts to what ever size, but the HW supports
* 256 element gamma table for LCDs and 1024 element table for
* OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
* tables so lets use that. Size of HW gamma table can be
* extracted with dispc_mgr_gamma_size(). If it returns 0
* gamma table is not supported.
*/
if (dispc_mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
drm_crtc_enable_color_mgmt(crtc, gamma_lut_size, true, 0);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
}
omap_plane_install_properties(crtc->primary, &crtc->base);
return crtc;
}
| linux-master | drivers/gpu/drm/omapdrm/omap_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
*/
#include <linux/list.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
#include "omap_drv.h"
/*
* encoder funcs
*/
#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
/* The encoder and connector both map to same dssdev.. the encoder
* handles the 'active' parts, ie. anything the modifies the state
* of the hw, and the connector handles the 'read-only' parts, like
* detecting connection and reading edid.
*/
struct omap_encoder {
struct drm_encoder base;
struct omap_dss_device *output;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
static void omap_encoder_update_videomode_flags(struct videomode *vm,
u32 bus_flags)
{
if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW |
DISPLAY_FLAGS_DE_HIGH))) {
if (bus_flags & DRM_BUS_FLAG_DE_LOW)
vm->flags |= DISPLAY_FLAGS_DE_LOW;
else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
vm->flags |= DISPLAY_FLAGS_DE_HIGH;
}
if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
else if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
}
if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
DISPLAY_FLAGS_SYNC_NEGEDGE))) {
if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE)
vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
else if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE)
vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
}
}
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *output = omap_encoder->output;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_bridge *bridge;
struct videomode vm = { 0 };
u32 bus_flags;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder)
break;
}
drm_display_mode_to_videomode(adjusted_mode, &vm);
/*
* HACK: This fixes the vm flags.
* struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags and
* they get lost when converting back and forth between struct
* drm_display_mode and struct videomode. The hack below goes and
* fetches the missing flags.
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
for (bridge = output->bridge; bridge;
bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
continue;
bus_flags = bridge->timings->input_bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
}
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
/* Set timings for all devices in the display pipeline. */
dss_mgr_set_timings(output, &vm);
}
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.mode_set = omap_encoder_mode_set,
};
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
struct omap_dss_device *output)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
if (!omap_encoder)
goto fail;
omap_encoder->output = output;
encoder = &omap_encoder->base;
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
return encoder;
fail:
if (encoder)
omap_encoder_destroy(encoder);
return NULL;
}
| linux-master | drivers/gpu/drm/omapdrm/omap_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DMM IOMMU driver support functions for TI OMAP processors.
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <[email protected]>
* Andy Gross <[email protected]>
*/
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h> /* platform_device() */
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
#define DMM_DRIVER_NAME "dmm"
/* mappings for associating views to luts */
static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
#if defined(CONFIG_OF)
static const struct of_device_id dmm_of_match[];
#endif
/* global spinlock for protecting lists */
static DEFINE_SPINLOCK(list_lock);
/* Geometry table */
#define GEOM(xshift, yshift, bytes_per_pixel) { \
.x_shft = (xshift), \
.y_shft = (yshift), \
.cpp = (bytes_per_pixel), \
.slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
.slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
}
static const struct {
u32 x_shft; /* unused X-bits (as part of bpp) */
u32 y_shft; /* unused Y-bits (as part of bpp) */
u32 cpp; /* bytes/chars per pixel */
u32 slot_w; /* width of each slot (in pixels) */
u32 slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
[TILFMT_8BIT] = GEOM(0, 0, 1),
[TILFMT_16BIT] = GEOM(0, 1, 2),
[TILFMT_32BIT] = GEOM(1, 1, 4),
[TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
};
/* lookup table for registers w/ per-engine instances */
static const u32 reg[][4] = {
[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
[PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
{
struct dma_async_tx_descriptor *tx;
enum dma_status status;
dma_cookie_t cookie;
tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
if (!tx) {
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
}
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
return -EIO;
}
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
if (status != DMA_COMPLETE)
dev_err(dmm->dev, "i878 wa DMA copy failure\n");
dmaengine_terminate_all(dmm->wa_dma_chan);
return 0;
}
static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
{
dma_addr_t src, dst;
int r;
src = dmm->phys_base + reg;
dst = dmm->wa_dma_handle;
r = dmm_dma_copy(dmm, src, dst);
if (r) {
dev_err(dmm->dev, "sDMA read transfer timeout\n");
return readl(dmm->base + reg);
}
/*
* As per i878 workaround, the DMA is used to access the DMM registers.
* Make sure that the readl is not moved by the compiler or the CPU
* earlier than the DMA finished writing the value to memory.
*/
rmb();
return readl(dmm->wa_dma_data);
}
static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
{
dma_addr_t src, dst;
int r;
writel(val, dmm->wa_dma_data);
/*
* As per i878 workaround, the DMA is used to access the DMM registers.
* Make sure that the writel is not moved by the compiler or the CPU, so
* the data will be in place before we start the DMA to do the actual
* register write.
*/
wmb();
src = dmm->wa_dma_handle;
dst = dmm->phys_base + reg;
r = dmm_dma_copy(dmm, src, dst);
if (r) {
dev_err(dmm->dev, "sDMA write transfer timeout\n");
writel(val, dmm->base + reg);
}
}
static u32 dmm_read(struct dmm *dmm, u32 reg)
{
if (dmm->dmm_workaround) {
u32 v;
unsigned long flags;
spin_lock_irqsave(&dmm->wa_lock, flags);
v = dmm_read_wa(dmm, reg);
spin_unlock_irqrestore(&dmm->wa_lock, flags);
return v;
} else {
return readl(dmm->base + reg);
}
}
static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
{
if (dmm->dmm_workaround) {
unsigned long flags;
spin_lock_irqsave(&dmm->wa_lock, flags);
dmm_write_wa(dmm, val, reg);
spin_unlock_irqrestore(&dmm->wa_lock, flags);
} else {
writel(val, dmm->base + reg);
}
}
static int dmm_workaround_init(struct dmm *dmm)
{
dma_cap_mask_t mask;
spin_lock_init(&dmm->wa_lock);
dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32),
&dmm->wa_dma_handle, GFP_KERNEL);
if (!dmm->wa_dma_data)
return -ENOMEM;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
if (!dmm->wa_dma_chan) {
dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
return -ENODEV;
}
return 0;
}
static void dmm_workaround_uninit(struct dmm *dmm)
{
dma_release_channel(dmm->wa_dma_chan);
dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
}
/* simple allocator to grab next 16 byte aligned memory from txn */
static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
{
void *ptr;
struct refill_engine *engine = txn->engine_handle;
/* dmm programming requires 16 byte aligned addresses */
txn->current_pa = round_up(txn->current_pa, 16);
txn->current_va = (void *)round_up((long)txn->current_va, 16);
ptr = txn->current_va;
*pa = txn->current_pa;
txn->current_pa += sz;
txn->current_va += sz;
BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
return ptr;
}
/* check status and spin until wait_mask comes true */
static int wait_status(struct refill_engine *engine, u32 wait_mask)
{
struct dmm *dmm = engine->dmm;
u32 r = 0, err, i;
i = DMM_FIXED_RETRY_COUNT;
while (true) {
r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
err = r & DMM_PATSTATUS_ERR;
if (err) {
dev_err(dmm->dev,
"%s: error (engine%d). PAT_STATUS: 0x%08x\n",
__func__, engine->id, r);
return -EFAULT;
}
if ((r & wait_mask) == wait_mask)
break;
if (--i == 0) {
dev_err(dmm->dev,
"%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
__func__, engine->id, r);
return -ETIMEDOUT;
}
udelay(1);
}
return 0;
}
static void release_engine(struct refill_engine *engine)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
list_add(&engine->idle_node, &omap_dmm->idle_head);
spin_unlock_irqrestore(&list_lock, flags);
atomic_inc(&omap_dmm->engine_counter);
wake_up_interruptible(&omap_dmm->engine_queue);
}
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
{
struct dmm *dmm = arg;
u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
int i;
/* ack IRQ */
dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_ERR_MASK)
dev_err(dmm->dev,
"irq error(engine%d): IRQSTAT 0x%02x\n",
i, status & 0xff);
if (status & DMM_IRQSTAT_LST) {
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
complete(&dmm->engines[i].compl);
}
status >>= 8;
}
return IRQ_HANDLED;
}
/*
* Get a handle for a DMM transaction
*/
static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
{
struct dmm_txn *txn = NULL;
struct refill_engine *engine = NULL;
int ret;
unsigned long flags;
/* wait until an engine is available */
ret = wait_event_interruptible(omap_dmm->engine_queue,
atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
if (ret)
return ERR_PTR(ret);
/* grab an idle engine */
spin_lock_irqsave(&list_lock, flags);
if (!list_empty(&dmm->idle_head)) {
engine = list_entry(dmm->idle_head.next, struct refill_engine,
idle_node);
list_del(&engine->idle_node);
}
spin_unlock_irqrestore(&list_lock, flags);
BUG_ON(!engine);
txn = &engine->txn;
engine->tcm = tcm;
txn->engine_handle = engine;
txn->last_pat = NULL;
txn->current_va = engine->refill_va;
txn->current_pa = engine->refill_pa;
return txn;
}
/*
* Add region to DMM transaction. If pages or pages[i] is NULL, then the
* corresponding slot is cleared (ie. dummy_pa is programmed)
*/
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, u32 npages, u32 roll)
{
dma_addr_t pat_pa = 0, data_pa = 0;
u32 *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
int columns = (1 + area->x1 - area->x0);
int rows = (1 + area->y1 - area->y0);
int i = columns*rows;
pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
if (txn->last_pat)
txn->last_pat->next_pa = (u32)pat_pa;
pat->area = *area;
/* adjust Y coordinates based off of container parameters */
pat->area.y0 += engine->tcm->y_offset;
pat->area.y1 += engine->tcm->y_offset;
pat->ctrl = (struct pat_ctrl){
.start = 1,
.lut_id = engine->tcm->lut_id,
};
data = alloc_dma(txn, 4*i, &data_pa);
/* FIXME: what if data_pa is more than 32-bit ? */
pat->data_pa = data_pa;
while (i--) {
int n = i + roll;
if (n >= npages)
n -= npages;
data[i] = (pages && pages[n]) ?
page_to_phys(pages[n]) : engine->dmm->dummy_pa;
}
txn->last_pat = pat;
return;
}
/*
* Commit the DMM transaction.
*/
static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
{
int ret = 0;
struct refill_engine *engine = txn->engine_handle;
struct dmm *dmm = engine->dmm;
if (!txn->last_pat) {
dev_err(engine->dmm->dev, "need at least one txn\n");
ret = -EINVAL;
goto cleanup;
}
txn->last_pat->next_pa = 0;
/* ensure that the written descriptors are visible to DMM */
wmb();
/*
* NOTE: the wmb() above should be enough, but there seems to be a bug
* in OMAP's memory barrier implementation, which in some rare cases may
* cause the writes not to be observable after wmb().
*/
/* read back to ensure the data is in RAM */
readl(&txn->last_pat->next_pa);
/* write to PAT_DESCR to clear out any pending transaction */
dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
/* wait for engine ready: */
ret = wait_status(engine, DMM_PATSTATUS_READY);
if (ret) {
ret = -EFAULT;
goto cleanup;
}
/* mark whether it is async to denote list management in IRQ handler */
engine->async = wait ? false : true;
reinit_completion(&engine->compl);
/* verify that the irq handler sees the 'async' and completion value */
smp_mb();
/* kick reload */
dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
goto cleanup;
}
/* Check the engine status before continue */
ret = wait_status(engine, DMM_PATSTATUS_READY |
DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
}
cleanup:
/* only place engine back on list if we are done with it */
if (ret || wait)
release_engine(engine);
return ret;
}
/*
* DMM programming
*/
static int fill(struct tcm_area *area, struct page **pages,
u32 npages, u32 roll, bool wait)
{
int ret = 0;
struct tcm_area slice, area_s;
struct dmm_txn *txn;
/*
* FIXME
*
* Asynchronous fill does not work reliably, as the driver does not
* handle errors in the async code paths. The fill operation may
* silently fail, leading to leaking DMM engines, which may eventually
* lead to deadlock if we run out of DMM engines.
*
* For now, always set 'wait' so that we only use sync fills. Async
* fills should be fixed, or alternatively we could decide to only
* support sync fills and so the whole async code path could be removed.
*/
wait = true;
txn = dmm_txn_init(omap_dmm, area->tcm);
if (IS_ERR_OR_NULL(txn))
return -ENOMEM;
tcm_for_each_slice(slice, *area, area_s) {
struct pat_area p_area = {
.x0 = slice.p0.x, .y0 = slice.p0.y,
.x1 = slice.p1.x, .y1 = slice.p1.y,
};
dmm_txn_append(txn, &p_area, pages, npages, roll);
roll += tcm_sizeof(slice);
}
ret = dmm_txn_commit(txn, wait);
return ret;
}
/*
* Pin/unpin
*/
/* note: slots for which pages[i] == NULL are filled w/ dummy page
*/
int tiler_pin(struct tiler_block *block, struct page **pages,
u32 npages, u32 roll, bool wait)
{
int ret;
ret = fill(&block->area, pages, npages, roll, wait);
if (ret)
tiler_unpin(block);
return ret;
}
int tiler_unpin(struct tiler_block *block)
{
return fill(&block->area, NULL, 0, 0, false);
}
/*
* Reserve/release
*/
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
u16 h, u16 align)
{
struct tiler_block *block;
u32 min_align = 128;
int ret;
unsigned long flags;
u32 slot_bytes;
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return ERR_PTR(-ENOMEM);
BUG_ON(!validfmt(fmt));
/* convert width/height to slots */
w = DIV_ROUND_UP(w, geom[fmt].slot_w);
h = DIV_ROUND_UP(h, geom[fmt].slot_h);
/* convert alignment to slots */
slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
min_align = max(min_align, slot_bytes);
align = (align > min_align) ? ALIGN(align, min_align) : min_align;
align /= slot_bytes;
block->fmt = fmt;
ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
&block->area);
if (ret) {
kfree(block);
return ERR_PTR(-ENOMEM);
}
/* add to allocation list */
spin_lock_irqsave(&list_lock, flags);
list_add(&block->alloc_node, &omap_dmm->alloc_head);
spin_unlock_irqrestore(&list_lock, flags);
return block;
}
struct tiler_block *tiler_reserve_1d(size_t size)
{
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long flags;
if (!block)
return ERR_PTR(-ENOMEM);
block->fmt = TILFMT_PAGE;
if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
&block->area)) {
kfree(block);
return ERR_PTR(-ENOMEM);
}
spin_lock_irqsave(&list_lock, flags);
list_add(&block->alloc_node, &omap_dmm->alloc_head);
spin_unlock_irqrestore(&list_lock, flags);
return block;
}
/* note: if you have pin'd pages, you should have already unpin'd first! */
int tiler_release(struct tiler_block *block)
{
int ret = tcm_free(&block->area);
unsigned long flags;
if (block->area.tcm)
dev_err(omap_dmm->dev, "failed to release block\n");
spin_lock_irqsave(&list_lock, flags);
list_del(&block->alloc_node);
spin_unlock_irqrestore(&list_lock, flags);
kfree(block);
return ret;
}
/*
* Utils
*/
/* calculate the tiler space address of a pixel in a view orientation...
* below description copied from the display subsystem section of TRM:
*
* When the TILER is addressed, the bits:
* [28:27] = 0x0 for 8-bit tiled
* 0x1 for 16-bit tiled
* 0x2 for 32-bit tiled
* 0x3 for page mode
* [31:29] = 0x0 for 0-degree view
* 0x1 for 180-degree view + mirroring
* 0x2 for 0-degree view + mirroring
* 0x3 for 180-degree view
* 0x4 for 270-degree view + mirroring
* 0x5 for 270-degree view
* 0x6 for 90-degree view
* 0x7 for 90-degree view + mirroring
* Otherwise the bits indicated the corresponding bit address to access
* the SDRAM.
*/
static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
{
u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
alignment = geom[fmt].x_shft + geom[fmt].y_shft;
/* validate coordinate */
x_mask = MASK(x_bits);
y_mask = MASK(y_bits);
if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
x, x, x_mask, y, y, y_mask);
return 0;
}
/* account for mirroring */
if (orient & MASK_X_INVERT)
x ^= x_mask;
if (orient & MASK_Y_INVERT)
y ^= y_mask;
/* get coordinate address */
if (orient & MASK_XY_FLIP)
tmp = ((x << y_bits) + y);
else
tmp = ((y << x_bits) + x);
return TIL_ADDR((tmp << alignment), orient, fmt);
}
dma_addr_t tiler_ssptr(struct tiler_block *block)
{
BUG_ON(!validfmt(block->fmt));
return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
block->area.p0.x * geom[block->fmt].slot_w,
block->area.p0.y * geom[block->fmt].slot_h);
}
dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
u32 x, u32 y)
{
struct tcm_pt *p = &block->area.p0;
BUG_ON(!validfmt(block->fmt));
return tiler_get_address(block->fmt, orient,
(p->x * geom[block->fmt].slot_w) + x,
(p->y * geom[block->fmt].slot_h) + y);
}
void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h)
{
BUG_ON(!validfmt(fmt));
*w = round_up(*w, geom[fmt].slot_w);
*h = round_up(*h, geom[fmt].slot_h);
}
u32 tiler_stride(enum tiler_fmt fmt, u32 orient)
{
BUG_ON(!validfmt(fmt));
if (orient & MASK_XY_FLIP)
return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
else
return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
}
size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h)
{
tiler_align(fmt, &w, &h);
return geom[fmt].cpp * w * h;
}
size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h)
{
BUG_ON(!validfmt(fmt));
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
u32 tiler_get_cpu_cache_flags(void)
{
return omap_dmm->plat_data->cpu_cache_flags;
}
bool dmm_is_available(void)
{
return omap_dmm ? true : false;
}
static void omap_dmm_remove(struct platform_device *dev)
{
struct tiler_block *block, *_block;
int i;
unsigned long flags;
if (omap_dmm) {
/* Disable all enabled interrupts */
dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_CLR);
free_irq(omap_dmm->irq, omap_dmm);
/* free all area regions */
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
alloc_node) {
list_del(&block->alloc_node);
kfree(block);
}
spin_unlock_irqrestore(&list_lock, flags);
for (i = 0; i < omap_dmm->num_lut; i++)
if (omap_dmm->tcm && omap_dmm->tcm[i])
omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
kfree(omap_dmm->tcm);
kfree(omap_dmm->engines);
if (omap_dmm->refill_va)
dma_free_wc(omap_dmm->dev,
REFILL_BUFFER_SIZE * omap_dmm->num_engines,
omap_dmm->refill_va, omap_dmm->refill_pa);
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
if (omap_dmm->dmm_workaround)
dmm_workaround_uninit(omap_dmm);
iounmap(omap_dmm->base);
kfree(omap_dmm);
omap_dmm = NULL;
}
}
static int omap_dmm_probe(struct platform_device *dev)
{
int ret = -EFAULT, i;
struct tcm_area area = {0};
u32 hwinfo, pat_geom;
struct resource *mem;
omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
if (!omap_dmm)
goto fail;
/* initialize lists */
INIT_LIST_HEAD(&omap_dmm->alloc_head);
INIT_LIST_HEAD(&omap_dmm->idle_head);
init_waitqueue_head(&omap_dmm->engine_queue);
if (dev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(dmm_of_match, dev->dev.of_node);
if (!match) {
dev_err(&dev->dev, "failed to find matching device node\n");
ret = -ENODEV;
goto fail;
}
omap_dmm->plat_data = match->data;
}
/* lookup hwmod data - base address and irq */
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&dev->dev, "failed to get base address resource\n");
goto fail;
}
omap_dmm->phys_base = mem->start;
omap_dmm->base = ioremap(mem->start, SZ_2K);
if (!omap_dmm->base) {
dev_err(&dev->dev, "failed to get dmm base address\n");
goto fail;
}
omap_dmm->irq = platform_get_irq(dev, 0);
if (omap_dmm->irq < 0)
goto fail;
omap_dmm->dev = &dev->dev;
if (of_machine_is_compatible("ti,dra7")) {
/*
* DRA7 Errata i878 says that MPU should not be used to access
* RAM and DMM at the same time. As it's not possible to prevent
* MPU accessing RAM, we need to access DMM via a proxy.
*/
if (!dmm_workaround_init(omap_dmm)) {
omap_dmm->dmm_workaround = true;
dev_info(&dev->dev,
"workaround for errata i878 in use\n");
} else {
dev_warn(&dev->dev,
"failed to initialize work-around for i878\n");
}
}
hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
omap_dmm->container_width = 256;
omap_dmm->container_height = 128;
atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
/* read out actual LUT width and height */
pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
/* increment LUT by one if on OMAP5 */
/* LUT has twice the height, and is split into a separate container */
if (omap_dmm->lut_height != omap_dmm->container_height)
omap_dmm->num_lut++;
/* initialize DMM registers */
dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
dev_err(&dev->dev, "could not allocate dummy page\n");
ret = -ENOMEM;
goto fail;
}
/* set dma mask for device */
ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
if (ret)
goto fail;
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */
omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
REFILL_BUFFER_SIZE * omap_dmm->num_engines,
&omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
dev_err(&dev->dev, "could not allocate refill memory\n");
ret = -ENOMEM;
goto fail;
}
/* alloc engines */
omap_dmm->engines = kcalloc(omap_dmm->num_engines,
sizeof(*omap_dmm->engines), GFP_KERNEL);
if (!omap_dmm->engines) {
ret = -ENOMEM;
goto fail;
}
for (i = 0; i < omap_dmm->num_engines; i++) {
omap_dmm->engines[i].id = i;
omap_dmm->engines[i].dmm = omap_dmm;
omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
(REFILL_BUFFER_SIZE * i);
omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
(REFILL_BUFFER_SIZE * i);
init_completion(&omap_dmm->engines[i].compl);
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}
omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
GFP_KERNEL);
if (!omap_dmm->tcm) {
ret = -ENOMEM;
goto fail;
}
/* init containers */
/* Each LUT is associated with a TCM (container manager). We use the
lut_id to denote the lut_id used to identify the correct LUT for
programming during reill operations */
for (i = 0; i < omap_dmm->num_lut; i++) {
omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
omap_dmm->container_height);
if (!omap_dmm->tcm[i]) {
dev_err(&dev->dev, "failed to allocate container\n");
ret = -ENOMEM;
goto fail;
}
omap_dmm->tcm[i]->lut_id = i;
}
/* assign access mode containers to applicable tcm container */
/* OMAP 4 has 1 container for all 4 views */
/* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
containers[TILFMT_8BIT] = omap_dmm->tcm[0];
containers[TILFMT_16BIT] = omap_dmm->tcm[0];
containers[TILFMT_32BIT] = omap_dmm->tcm[0];
if (omap_dmm->container_height != omap_dmm->lut_height) {
/* second LUT is used for PAGE mode. Programming must use
y offset that is added to all y coordinates. LUT id is still
0, because it is the same LUT, just the upper 128 lines */
containers[TILFMT_PAGE] = omap_dmm->tcm[1];
omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
omap_dmm->tcm[1]->lut_id = 0;
} else {
containers[TILFMT_PAGE] = omap_dmm->tcm[0];
}
area = (struct tcm_area) {
.tcm = NULL,
.p1.x = omap_dmm->container_width - 1,
.p1.y = omap_dmm->container_height - 1,
};
ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
"omap_dmm_irq_handler", omap_dmm);
if (ret) {
dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
omap_dmm->irq, ret);
omap_dmm->irq = -1;
goto fail;
}
/* Enable all interrupts for each refill engine except
* ERR_LUT_MISS<n> (which is just advisory, and we don't care
* about because we want to be able to refill live scanout
* buffers for accelerated pan/scroll) and FILL_DSC<n> which
* we just generally don't care about.
*/
dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
/* initialize all LUTs to dummy page entries */
for (i = 0; i < omap_dmm->num_lut; i++) {
area.tcm = omap_dmm->tcm[i];
if (fill(&area, NULL, 0, 0, true))
dev_err(omap_dmm->dev, "refill failed");
}
dev_info(omap_dmm->dev, "initialized all PAT entries\n");
return 0;
fail:
omap_dmm_remove(dev);
return ret;
}
/*
* debugfs support
*/
#ifdef CONFIG_DEBUG_FS
static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
static const char *special = ".,:;'\"`~!^-+";
static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
char c, bool ovw)
{
int x, y;
for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
if (map[y][x] == ' ' || ovw)
map[y][x] = c;
}
static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
char c)
{
map[p->y / ydiv][p->x / xdiv] = c;
}
static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
{
return map[p->y / ydiv][p->x / xdiv];
}
static int map_width(int xdiv, int x0, int x1)
{
return (x1 / xdiv) - (x0 / xdiv) + 1;
}
static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
{
char *p = map[yd] + (x0 / xdiv);
int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
if (w >= 0) {
p += w;
while (*nice)
*p++ = *nice++;
}
}
static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
struct tcm_area *a)
{
sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
if (a->p0.y + 1 < a->p1.y) {
text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
256 - 1);
} else if (a->p0.y < a->p1.y) {
if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
text_map(map, xdiv, nice, a->p0.y / ydiv,
a->p0.x + xdiv, 256 - 1);
else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
text_map(map, xdiv, nice, a->p1.y / ydiv,
0, a->p1.y - xdiv);
} else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
}
}
static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
struct tcm_area *a)
{
sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
a->p0.x, a->p1.x);
}
int tiler_map_show(struct seq_file *s, void *arg)
{
int xdiv = 2, ydiv = 1;
char **map = NULL, *global_map;
struct tiler_block *block;
struct tcm_area a, p;
int i;
const char *m2d = alphabet;
const char *a2d = special;
const char *m2dp = m2d, *a2dp = a2d;
char nice[128];
int h_adj;
int w_adj;
unsigned long flags;
int lut_idx;
if (!omap_dmm) {
/* early return if dmm/tiler device is not initialized */
return 0;
}
h_adj = omap_dmm->container_height / ydiv;
w_adj = omap_dmm->container_width / xdiv;
map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL);
global_map = kmalloc_array(w_adj + 1, h_adj, GFP_KERNEL);
if (!map || !global_map)
goto error;
for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
memset(map, 0, h_adj * sizeof(*map));
memset(global_map, ' ', (w_adj + 1) * h_adj);
for (i = 0; i < omap_dmm->container_height; i++) {
map[i] = global_map + i * (w_adj + 1);
map[i][w_adj] = 0;
}
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
if (block->fmt != TILFMT_PAGE) {
fill_map(map, xdiv, ydiv, &block->area,
*m2dp, true);
if (!*++a2dp)
a2dp = a2d;
if (!*++m2dp)
m2dp = m2d;
map_2d_info(map, xdiv, ydiv, nice,
&block->area);
} else {
bool start = read_map_pt(map, xdiv,
ydiv, &block->area.p0) == ' ';
bool end = read_map_pt(map, xdiv, ydiv,
&block->area.p1) == ' ';
tcm_for_each_slice(a, block->area, p)
fill_map(map, xdiv, ydiv, &a,
'=', true);
fill_map_pt(map, xdiv, ydiv,
&block->area.p0,
start ? '<' : 'X');
fill_map_pt(map, xdiv, ydiv,
&block->area.p1,
end ? '>' : 'X');
map_1d_info(map, xdiv, ydiv, nice,
&block->area);
}
}
}
spin_unlock_irqrestore(&list_lock, flags);
if (s) {
seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
for (i = 0; i < 128; i++)
seq_printf(s, "%03d:%s\n", i, map[i]);
seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
} else {
dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
lut_idx);
for (i = 0; i < 128; i++)
dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
lut_idx);
}
}
error:
kfree(map);
kfree(global_map);
return 0;
}
#endif
#ifdef CONFIG_PM_SLEEP
static int omap_dmm_resume(struct device *dev)
{
struct tcm_area area;
int i;
if (!omap_dmm)
return -ENODEV;
area = (struct tcm_area) {
.tcm = NULL,
.p1.x = omap_dmm->container_width - 1,
.p1.y = omap_dmm->container_height - 1,
};
/* initialize all LUTs to dummy page entries */
for (i = 0; i < omap_dmm->num_lut; i++) {
area.tcm = omap_dmm->tcm[i];
if (fill(&area, NULL, 0, 0, true))
dev_err(dev, "refill failed");
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
#if defined(CONFIG_OF)
static const struct dmm_platform_data dmm_omap4_platform_data = {
.cpu_cache_flags = OMAP_BO_WC,
};
static const struct dmm_platform_data dmm_omap5_platform_data = {
.cpu_cache_flags = OMAP_BO_UNCACHED,
};
static const struct of_device_id dmm_of_match[] = {
{
.compatible = "ti,omap4-dmm",
.data = &dmm_omap4_platform_data,
},
{
.compatible = "ti,omap5-dmm",
.data = &dmm_omap5_platform_data,
},
{},
};
#endif
struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
.remove_new = omap_dmm_remove,
.driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
.pm = &omap_dmm_pm_ops,
},
};
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andy Gross <[email protected]>");
MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
| linux-master | drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI CEC
*
* Based on the CEC code from hdmi_ti_4xxx_ip.c from Android.
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <[email protected]>
*
* Heavily modified to use the linux CEC framework:
*
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dss.h"
#include "hdmi.h"
#include "hdmi4_core.h"
#include "hdmi4_cec.h"
/* HDMI CEC */
#define HDMI_CEC_DEV_ID 0x900
#define HDMI_CEC_SPEC 0x904
/* Not really a debug register, more a low-level control register */
#define HDMI_CEC_DBG_3 0x91C
#define HDMI_CEC_TX_INIT 0x920
#define HDMI_CEC_TX_DEST 0x924
#define HDMI_CEC_SETUP 0x938
#define HDMI_CEC_TX_COMMAND 0x93C
#define HDMI_CEC_TX_OPERAND 0x940
#define HDMI_CEC_TRANSMIT_DATA 0x97C
#define HDMI_CEC_CA_7_0 0x988
#define HDMI_CEC_CA_15_8 0x98C
#define HDMI_CEC_INT_STATUS_0 0x998
#define HDMI_CEC_INT_STATUS_1 0x99C
#define HDMI_CEC_INT_ENABLE_0 0x990
#define HDMI_CEC_INT_ENABLE_1 0x994
#define HDMI_CEC_RX_CONTROL 0x9B0
#define HDMI_CEC_RX_COUNT 0x9B4
#define HDMI_CEC_RX_CMD_HEADER 0x9B8
#define HDMI_CEC_RX_COMMAND 0x9BC
#define HDMI_CEC_RX_OPERAND 0x9C0
#define HDMI_CEC_TX_FIFO_INT_MASK 0x64
#define HDMI_CEC_RETRANSMIT_CNT_INT_MASK 0x2
#define HDMI_CORE_CEC_RETRY 200
static void hdmi_cec_received_msg(struct hdmi_core_data *core)
{
u32 cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
/* While there are CEC frames in the FIFO */
while (cnt & 0x70) {
/* and the frame doesn't have an error */
if (!(cnt & 0x80)) {
struct cec_msg msg = {};
unsigned int i;
/* then read the message */
msg.len = cnt & 0xf;
if (msg.len > CEC_MAX_MSG_SIZE - 2)
msg.len = CEC_MAX_MSG_SIZE - 2;
msg.msg[0] = hdmi_read_reg(core->base,
HDMI_CEC_RX_CMD_HEADER);
msg.msg[1] = hdmi_read_reg(core->base,
HDMI_CEC_RX_COMMAND);
for (i = 0; i < msg.len; i++) {
unsigned int reg = HDMI_CEC_RX_OPERAND + i * 4;
msg.msg[2 + i] =
hdmi_read_reg(core->base, reg);
}
msg.len += 2;
cec_received_msg(core->adap, &msg);
}
/* Clear the current frame from the FIFO */
hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 1);
/* Wait until the current frame is cleared */
while (hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL) & 1)
udelay(1);
/*
* Re-read the count register and loop to see if there are
* more messages in the FIFO.
*/
cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
}
}
void hdmi4_cec_irq(struct hdmi_core_data *core)
{
u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
u32 stat1 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
if (stat0 & 0x20) {
cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
} else if (stat1 & 0x02) {
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
cec_transmit_done(core->adap,
CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES,
0, (dbg3 >> 4) & 7, 0, 0);
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
if (stat0 & 0x02)
hdmi_cec_received_msg(core);
}
static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
{
struct hdmi_core_data *core = cec_get_drvdata(adap);
int retry = HDMI_CORE_CEC_RETRY;
int temp;
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
while (retry) {
temp = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
if (FLD_GET(temp, 7, 7) == 0)
break;
retry--;
}
return retry != 0;
}
static bool hdmi_cec_clear_rx_fifo(struct cec_adapter *adap)
{
struct hdmi_core_data *core = cec_get_drvdata(adap);
int retry = HDMI_CORE_CEC_RETRY;
int temp;
hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 0x3);
retry = HDMI_CORE_CEC_RETRY;
while (retry) {
temp = hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL);
if (FLD_GET(temp, 1, 0) == 0)
break;
retry--;
}
return retry != 0;
}
static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct hdmi_core_data *core = cec_get_drvdata(adap);
int temp, err;
if (!enable) {
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0);
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0);
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return 0;
}
err = hdmi4_core_enable(core);
if (err)
return err;
/*
* Initialize CEC clock divider: CEC needs 2MHz clock hence
* set the divider to 24 to get 48/24=2MHz clock
*/
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
/* Clear TX FIFO */
if (!hdmi_cec_clear_tx_fifo(adap)) {
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
err = -EIO;
goto err_disable_clk;
}
/* Clear RX FIFO */
if (!hdmi_cec_clear_rx_fifo(adap)) {
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
err = -EIO;
goto err_disable_clk;
}
/* Clear CEC interrupts */
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1));
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0));
/* Enable HDMI core interrupts */
hdmi_wp_set_irqenable(core->wp, HDMI_IRQ_CORE);
/* Unmask CEC interrupt */
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0x1, 3, 3);
/*
* Enable CEC interrupts:
* Transmit Buffer Full/Empty Change event
* Receiver FIFO Not Empty event
*/
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
/*
* Enable CEC interrupts:
* Frame Retransmit Count Exceeded event
*/
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
/* cec calibration enable (self clearing) */
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
msleep(20);
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x04);
temp = hdmi_read_reg(core->base, HDMI_CEC_SETUP);
if (FLD_GET(temp, 4, 4) != 0) {
temp = FLD_MOD(temp, 0, 4, 4);
hdmi_write_reg(core->base, HDMI_CEC_SETUP, temp);
/*
* If we enabled CEC in middle of a CEC message on the bus,
* we could have start bit irregularity and/or short
* pulse event. Clear them now.
*/
temp = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
temp = FLD_MOD(0x0, 0x5, 2, 0);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
}
return 0;
err_disable_clk:
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return err;
}
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct hdmi_core_data *core = cec_get_drvdata(adap);
u32 v;
if (log_addr == CEC_LOG_ADDR_INVALID) {
hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, 0);
hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, 0);
return 0;
}
if (log_addr <= 7) {
v = hdmi_read_reg(core->base, HDMI_CEC_CA_7_0);
v |= 1 << log_addr;
hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, v);
} else {
v = hdmi_read_reg(core->base, HDMI_CEC_CA_15_8);
v |= 1 << (log_addr - 8);
hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, v);
}
return 0;
}
static int hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct hdmi_core_data *core = cec_get_drvdata(adap);
int temp;
u32 i;
/* Clear TX FIFO */
if (!hdmi_cec_clear_tx_fifo(adap)) {
pr_err("cec-%s: could not clear TX FIFO for transmit\n",
adap->name);
return -EIO;
}
/* Clear TX interrupts */
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
HDMI_CEC_TX_FIFO_INT_MASK);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
HDMI_CEC_RETRANSMIT_CNT_INT_MASK);
/* Set the retry count */
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, attempts - 1, 6, 4);
/* Set the initiator addresses */
hdmi_write_reg(core->base, HDMI_CEC_TX_INIT, cec_msg_initiator(msg));
/* Set destination id */
temp = cec_msg_destination(msg);
if (msg->len == 1)
temp |= 0x80;
hdmi_write_reg(core->base, HDMI_CEC_TX_DEST, temp);
if (msg->len == 1)
return 0;
/* Setup command and arguments for the command */
hdmi_write_reg(core->base, HDMI_CEC_TX_COMMAND, msg->msg[1]);
for (i = 0; i < msg->len - 2; i++)
hdmi_write_reg(core->base, HDMI_CEC_TX_OPERAND + i * 4,
msg->msg[2 + i]);
/* Operand count */
hdmi_write_reg(core->base, HDMI_CEC_TRANSMIT_DATA,
(msg->len - 2) | 0x10);
return 0;
}
static const struct cec_adap_ops hdmi_cec_adap_ops = {
.adap_enable = hdmi_cec_adap_enable,
.adap_log_addr = hdmi_cec_adap_log_addr,
.adap_transmit = hdmi_cec_adap_transmit,
};
void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
{
cec_s_phys_addr(core->adap, pa, false);
}
int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
struct hdmi_wp_data *wp)
{
const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
int ret;
core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
"omap4", caps, CEC_MAX_LOG_ADDRS);
ret = PTR_ERR_OR_ZERO(core->adap);
if (ret < 0)
return ret;
core->wp = wp;
/* Disable clock initially, hdmi_cec_adap_enable() manages it */
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
ret = cec_register_adapter(core->adap, &pdev->dev);
if (ret < 0) {
cec_delete_adapter(core->adap);
return ret;
}
return 0;
}
void hdmi4_cec_uninit(struct hdmi_core_data *core)
{
cec_unregister_adapter(core->adap);
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP5 HDMI CORE IP driver library
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
* Authors:
* Yong Zhi
* Mythri pk
* Archit Taneja <[email protected]>
* Tomi Valkeinen <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <drm/drm_edid.h>
#include <sound/asound.h>
#include <sound/asoundef.h>
#include "hdmi5_core.h"
void hdmi5_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
const unsigned int ss_scl_high = 4700; /* ns */
const unsigned int ss_scl_low = 5500; /* ns */
const unsigned int fs_scl_high = 600; /* ns */
const unsigned int fs_scl_low = 1300; /* ns */
const unsigned int sda_hold = 1000; /* ns */
const unsigned int sfr_div = 10;
unsigned long long sfr;
unsigned int v;
sfr = iclk / sfr_div; /* SFR_DIV */
sfr /= 1000; /* SFR clock in kHz */
/* Reset */
REG_FLD_MOD(base, HDMI_CORE_I2CM_SOFTRSTZ, 0, 0, 0);
if (hdmi_wait_for_bit_change(base, HDMI_CORE_I2CM_SOFTRSTZ,
0, 0, 1) != 1)
DSSERR("HDMI I2CM reset failed\n");
/* Standard (0) or Fast (1) Mode */
REG_FLD_MOD(base, HDMI_CORE_I2CM_DIV, 0, 3, 3);
/* Standard Mode SCL High counter */
v = DIV_ROUND_UP_ULL(ss_scl_high * sfr, 1000000);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_HCNT_1_ADDR,
(v >> 8) & 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_HCNT_0_ADDR,
v & 0xff, 7, 0);
/* Standard Mode SCL Low counter */
v = DIV_ROUND_UP_ULL(ss_scl_low * sfr, 1000000);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_LCNT_1_ADDR,
(v >> 8) & 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_LCNT_0_ADDR,
v & 0xff, 7, 0);
/* Fast Mode SCL High Counter */
v = DIV_ROUND_UP_ULL(fs_scl_high * sfr, 1000000);
REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_HCNT_1_ADDR,
(v >> 8) & 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_HCNT_0_ADDR,
v & 0xff, 7, 0);
/* Fast Mode SCL Low Counter */
v = DIV_ROUND_UP_ULL(fs_scl_low * sfr, 1000000);
REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_LCNT_1_ADDR,
(v >> 8) & 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_LCNT_0_ADDR,
v & 0xff, 7, 0);
/* SDA Hold Time */
v = DIV_ROUND_UP_ULL(sda_hold * sfr, 1000000);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SDA_HOLD_ADDR, v & 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SLAVE, 0x50, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGADDR, 0x30, 6, 0);
/* NACK_POL to high */
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 7, 7);
/* NACK_MASK to unmasked */
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x0, 6, 6);
/* ARBITRATION_POL to high */
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 3, 3);
/* ARBITRATION_MASK to unmasked */
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x0, 2, 2);
/* DONE_POL to high */
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 3, 3);
/* DONE_MASK to unmasked */
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2);
}
void hdmi5_core_ddc_uninit(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
/* Mask I2C interrupts */
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 6, 6);
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 2, 2);
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
}
int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u8 cur_addr;
const int retries = 1000;
u8 seg_ptr = block / 2;
u8 edidbase = ((block % 2) * EDID_LENGTH);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0);
/*
* TODO: We use polling here, although we probably should use proper
* interrupts.
*/
for (cur_addr = 0; cur_addr < len; ++cur_addr) {
int i;
/* clear ERROR and DONE */
REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0x3, 1, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_ADDRESS,
edidbase + cur_addr, 7, 0);
if (seg_ptr)
REG_FLD_MOD(base, HDMI_CORE_I2CM_OPERATION, 1, 1, 1);
else
REG_FLD_MOD(base, HDMI_CORE_I2CM_OPERATION, 1, 0, 0);
for (i = 0; i < retries; ++i) {
u32 stat;
stat = REG_GET(base, HDMI_CORE_IH_I2CM_STAT0, 1, 0);
/* I2CM_ERROR */
if (stat & 1) {
DSSERR("HDMI I2C Master Error\n");
return -EIO;
}
/* I2CM_DONE */
if (stat & (1 << 1))
break;
usleep_range(250, 1000);
}
if (i == retries) {
DSSERR("HDMI I2C timeout reading EDID\n");
return -EIO;
}
buf[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
}
return 0;
}
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(core->base, r))
DUMPCORE(HDMI_CORE_FC_INVIDCONF);
DUMPCORE(HDMI_CORE_FC_INHACTIV0);
DUMPCORE(HDMI_CORE_FC_INHACTIV1);
DUMPCORE(HDMI_CORE_FC_INHBLANK0);
DUMPCORE(HDMI_CORE_FC_INHBLANK1);
DUMPCORE(HDMI_CORE_FC_INVACTIV0);
DUMPCORE(HDMI_CORE_FC_INVACTIV1);
DUMPCORE(HDMI_CORE_FC_INVBLANK);
DUMPCORE(HDMI_CORE_FC_HSYNCINDELAY0);
DUMPCORE(HDMI_CORE_FC_HSYNCINDELAY1);
DUMPCORE(HDMI_CORE_FC_HSYNCINWIDTH0);
DUMPCORE(HDMI_CORE_FC_HSYNCINWIDTH1);
DUMPCORE(HDMI_CORE_FC_VSYNCINDELAY);
DUMPCORE(HDMI_CORE_FC_VSYNCINWIDTH);
DUMPCORE(HDMI_CORE_FC_CTRLDUR);
DUMPCORE(HDMI_CORE_FC_EXCTRLDUR);
DUMPCORE(HDMI_CORE_FC_EXCTRLSPAC);
DUMPCORE(HDMI_CORE_FC_CH0PREAM);
DUMPCORE(HDMI_CORE_FC_CH1PREAM);
DUMPCORE(HDMI_CORE_FC_CH2PREAM);
DUMPCORE(HDMI_CORE_FC_AVICONF0);
DUMPCORE(HDMI_CORE_FC_AVICONF1);
DUMPCORE(HDMI_CORE_FC_AVICONF2);
DUMPCORE(HDMI_CORE_FC_AVIVID);
DUMPCORE(HDMI_CORE_FC_PRCONF);
DUMPCORE(HDMI_CORE_MC_CLKDIS);
DUMPCORE(HDMI_CORE_MC_SWRSTZREQ);
DUMPCORE(HDMI_CORE_MC_FLOWCTRL);
DUMPCORE(HDMI_CORE_MC_PHYRSTZ);
DUMPCORE(HDMI_CORE_MC_LOCKONCLOCK);
DUMPCORE(HDMI_CORE_I2CM_SLAVE);
DUMPCORE(HDMI_CORE_I2CM_ADDRESS);
DUMPCORE(HDMI_CORE_I2CM_DATAO);
DUMPCORE(HDMI_CORE_I2CM_DATAI);
DUMPCORE(HDMI_CORE_I2CM_OPERATION);
DUMPCORE(HDMI_CORE_I2CM_INT);
DUMPCORE(HDMI_CORE_I2CM_CTLINT);
DUMPCORE(HDMI_CORE_I2CM_DIV);
DUMPCORE(HDMI_CORE_I2CM_SEGADDR);
DUMPCORE(HDMI_CORE_I2CM_SOFTRSTZ);
DUMPCORE(HDMI_CORE_I2CM_SEGPTR);
DUMPCORE(HDMI_CORE_I2CM_SS_SCL_HCNT_1_ADDR);
DUMPCORE(HDMI_CORE_I2CM_SS_SCL_HCNT_0_ADDR);
DUMPCORE(HDMI_CORE_I2CM_SS_SCL_LCNT_1_ADDR);
DUMPCORE(HDMI_CORE_I2CM_SS_SCL_LCNT_0_ADDR);
DUMPCORE(HDMI_CORE_I2CM_FS_SCL_HCNT_1_ADDR);
DUMPCORE(HDMI_CORE_I2CM_FS_SCL_HCNT_0_ADDR);
DUMPCORE(HDMI_CORE_I2CM_FS_SCL_LCNT_1_ADDR);
DUMPCORE(HDMI_CORE_I2CM_FS_SCL_LCNT_0_ADDR);
DUMPCORE(HDMI_CORE_I2CM_SDA_HOLD_ADDR);
}
static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
const struct hdmi_config *cfg)
{
DSSDBG("hdmi_core_init\n");
video_cfg->v_fc_config.vm = cfg->vm;
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
video_cfg->hblank = cfg->vm.hfront_porch +
cfg->vm.hback_porch + cfg->vm.hsync_len;
video_cfg->vblank_osc = 0;
video_cfg->vblank = cfg->vm.vsync_len + cfg->vm.vfront_porch +
cfg->vm.vback_porch;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
if (cfg->vm.flags & DISPLAY_FLAGS_INTERLACED) {
/* set vblank_osc if vblank is fractional */
if (video_cfg->vblank % 2 != 0)
video_cfg->vblank_osc = 1;
video_cfg->v_fc_config.vm.vactive /= 2;
video_cfg->vblank /= 2;
video_cfg->v_fc_config.vm.vfront_porch /= 2;
video_cfg->v_fc_config.vm.vsync_len /= 2;
video_cfg->v_fc_config.vm.vback_porch /= 2;
}
if (cfg->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_cfg->v_fc_config.vm.hactive *= 2;
video_cfg->hblank *= 2;
video_cfg->v_fc_config.vm.hfront_porch *= 2;
video_cfg->v_fc_config.vm.hsync_len *= 2;
video_cfg->v_fc_config.vm.hback_porch *= 2;
}
}
/* DSS_HDMI_CORE_VIDEO_CONFIG */
static void hdmi_core_video_config(struct hdmi_core_data *core,
const struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
const struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
/* Set hsync, vsync and data-enable polarity */
r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF);
r = FLD_MOD(r, vsync_pol, 6, 6);
r = FLD_MOD(r, hsync_pol, 5, 5);
r = FLD_MOD(r, cfg->data_enable_pol, 4, 4);
r = FLD_MOD(r, cfg->vblank_osc, 1, 1);
r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 0, 0);
hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r);
/* set x resolution */
REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, vm->hactive >> 8, 4, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, vm->hactive & 0xFF, 7, 0);
/* set y resolution */
REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, vm->vactive >> 8, 4, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, vm->vactive & 0xFF, 7, 0);
/* set horizontal blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK0, cfg->hblank & 0xFF, 7, 0);
/* set vertial blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0);
/* set horizontal sync offset */
REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, vm->hfront_porch >> 8,
4, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, vm->hfront_porch & 0xFF,
7, 0);
/* set vertical sync offset */
REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, vm->vfront_porch, 7, 0);
/* set horizontal sync pulse width */
REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, (vm->hsync_len >> 8),
1, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, vm->hsync_len & 0xFF,
7, 0);
/* set vertical sync pulse width */
REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, vm->vsync_len, 5, 0);
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
else
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
}
static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
int clr_depth = 0; /* 24 bit color depth */
/* COLOR_DEPTH */
REG_FLD_MOD(base, HDMI_CORE_VP_PR_CD, clr_depth, 7, 4);
/* BYPASS_EN */
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 1, 6, 6);
/* PP_EN */
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 1 : 0, 5, 5);
/* YCC422_EN */
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, 0, 3, 3);
/* PP_STUFFING */
REG_FLD_MOD(base, HDMI_CORE_VP_STUFF, clr_depth ? 1 : 0, 1, 1);
/* YCC422_STUFFING */
REG_FLD_MOD(base, HDMI_CORE_VP_STUFF, 1, 2, 2);
/* OUTPUT_SELECTOR */
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0);
}
static void hdmi_core_config_video_sampler(struct hdmi_core_data *core)
{
int video_mapping = 1; /* for 24 bit color depth */
/* VIDEO_MAPPING */
REG_FLD_MOD(core->base, HDMI_CORE_TX_INVID0, video_mapping, 4, 0);
}
static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
struct hdmi_avi_infoframe *frame)
{
void __iomem *base = core->base;
u8 data[HDMI_INFOFRAME_SIZE(AVI)];
u8 *ptr;
unsigned int y, a, b, s;
unsigned int c, m, r;
unsigned int itc, ec, q, sc;
unsigned int vic;
unsigned int yq, cn, pr;
hdmi_avi_infoframe_pack(frame, data, sizeof(data));
print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data,
HDMI_INFOFRAME_SIZE(AVI), false);
ptr = data + HDMI_INFOFRAME_HEADER_SIZE;
y = (ptr[0] >> 5) & 0x3;
a = (ptr[0] >> 4) & 0x1;
b = (ptr[0] >> 2) & 0x3;
s = (ptr[0] >> 0) & 0x3;
c = (ptr[1] >> 6) & 0x3;
m = (ptr[1] >> 4) & 0x3;
r = (ptr[1] >> 0) & 0xf;
itc = (ptr[2] >> 7) & 0x1;
ec = (ptr[2] >> 4) & 0x7;
q = (ptr[2] >> 2) & 0x3;
sc = (ptr[2] >> 0) & 0x3;
vic = ptr[3];
yq = (ptr[4] >> 6) & 0x3;
cn = (ptr[4] >> 4) & 0x3;
pr = (ptr[4] >> 0) & 0xf;
hdmi_write_reg(base, HDMI_CORE_FC_AVICONF0,
(a << 6) | (s << 4) | (b << 2) | (y << 0));
hdmi_write_reg(base, HDMI_CORE_FC_AVICONF1,
(c << 6) | (m << 4) | (r << 0));
hdmi_write_reg(base, HDMI_CORE_FC_AVICONF2,
(itc << 7) | (ec << 4) | (q << 2) | (sc << 0));
hdmi_write_reg(base, HDMI_CORE_FC_AVIVID, vic);
hdmi_write_reg(base, HDMI_CORE_FC_AVICONF3,
(yq << 2) | (cn << 0));
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0);
}
static void hdmi_core_write_csc(struct hdmi_core_data *core,
const struct csc_table *csc_coeff)
{
void __iomem *base = core->base;
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff->a1 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff->a1, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff->a2 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff->a2, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff->a3 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff->a3, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff->a4 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff->a4, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff->b1 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff->b1, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff->b2 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff->b2, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff->b3 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff->b3, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff->b4 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff->b4, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff->c1 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff->c1, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff->c2 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff->c2, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff->c3 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff->c3, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff->c4 >> 8, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff->c4, 7, 0);
/* enable CSC */
REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0);
}
static void hdmi_core_configure_range(struct hdmi_core_data *core,
enum hdmi_quantization_range range)
{
static const struct csc_table csc_limited_range = {
7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32
};
static const struct csc_table csc_full_range = {
8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0
};
const struct csc_table *csc_coeff;
/* CSC_COLORDEPTH = 24 bits*/
REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, 0, 7, 4);
switch (range) {
case HDMI_QUANTIZATION_RANGE_FULL:
csc_coeff = &csc_full_range;
break;
case HDMI_QUANTIZATION_RANGE_DEFAULT:
case HDMI_QUANTIZATION_RANGE_LIMITED:
default:
csc_coeff = &csc_limited_range;
break;
}
hdmi_core_write_csc(core, csc_coeff);
}
static void hdmi_core_enable_video_path(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
DSSDBG("hdmi_core_enable_video_path\n");
REG_FLD_MOD(base, HDMI_CORE_FC_CTRLDUR, 0x0C, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_EXCTRLDUR, 0x20, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_EXCTRLSPAC, 0x01, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_CH0PREAM, 0x0B, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_CH1PREAM, 0x16, 5, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_CH2PREAM, 0x21, 5, 0);
REG_FLD_MOD(base, HDMI_CORE_MC_CLKDIS, 0x00, 0, 0);
REG_FLD_MOD(base, HDMI_CORE_MC_CLKDIS, 0x00, 1, 1);
}
static void hdmi_core_mask_interrupts(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
/* Master IRQ mask */
REG_FLD_MOD(base, HDMI_CORE_IH_MUTE, 0x3, 1, 0);
/* Mask all the interrupts in HDMI core */
REG_FLD_MOD(base, HDMI_CORE_VP_MASK, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_MASK0, 0xe7, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_MASK1, 0xfb, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_MASK2, 0x3, 1, 0);
REG_FLD_MOD(base, HDMI_CORE_AUD_INT, 0x3, 3, 2);
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_MASK, 0x3, 1, 0);
REG_FLD_MOD(base, HDMI_CORE_CEC_MASK, 0x7f, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 6, 6);
REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 2, 2);
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
REG_FLD_MOD(base, HDMI_CORE_PHY_MASK0, 0xf3, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
/* Clear all the current interrupt bits */
REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xe7, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xfb, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0x3, 1, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0x7, 2, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0x7f, 6, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0x3, 1, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
}
static void hdmi_core_enable_interrupts(struct hdmi_core_data *core)
{
/* Unmute interrupts */
REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0);
}
int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0);
return 0;
}
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
enum hdmi_quantization_range range;
hdmi_core_mask_interrupts(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
char vic = cfg->infoframe.video_code;
/* All CEA modes other than VIC 1 use limited quantization range. */
range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL;
} else {
range = HDMI_QUANTIZATION_RANGE_FULL;
}
hdmi_core_init(&v_core_cfg, cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
hdmi_wp_video_config_interface(wp, &vm);
hdmi_core_configure_range(core, range);
cfg->infoframe.quantization_range = range;
/*
* configure core video part, set software reset in the core
*/
v_core_cfg.packet_mode = HDMI_PACKETMODE24BITPERPIXEL;
hdmi_core_video_config(core, &v_core_cfg);
hdmi_core_config_video_packetizer(core);
hdmi_core_config_video_sampler(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI)
hdmi_core_write_avi_infoframe(core, &cfg->infoframe);
hdmi_core_enable_video_path(core);
hdmi_core_enable_interrupts(core);
}
static void hdmi5_core_audio_config(struct hdmi_core_data *core,
struct hdmi_core_audio_config *cfg)
{
void __iomem *base = core->base;
u8 val;
/* Mute audio before configuring */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 0xf, 7, 4);
/* Set the N parameter */
REG_FLD_MOD(base, HDMI_CORE_AUD_N1, cfg->n, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_AUD_N2, cfg->n >> 8, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_AUD_N3, cfg->n >> 16, 3, 0);
/*
* CTS manual mode. Automatic mode is not supported when using audio
* parallel interface.
*/
REG_FLD_MOD(base, HDMI_CORE_AUD_CTS3, 1, 4, 4);
REG_FLD_MOD(base, HDMI_CORE_AUD_CTS1, cfg->cts, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_AUD_CTS2, cfg->cts >> 8, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_AUD_CTS3, cfg->cts >> 16, 3, 0);
/* Layout of Audio Sample Packets: 2-channel or multichannels */
if (cfg->layout == HDMI_AUDIO_LAYOUT_2CH)
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 0, 0, 0);
else
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 1, 0, 0);
/* Configure IEC-609580 Validity bits */
/* Channel 0 is valid */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, 0, 0, 0);
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, 0, 4, 4);
if (cfg->layout == HDMI_AUDIO_LAYOUT_2CH)
val = 1;
else
val = 0;
/* Channels 1, 2 setting */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 1, 1);
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 5, 5);
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 2, 2);
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 6, 6);
/* Channel 3 setting */
if (cfg->layout == HDMI_AUDIO_LAYOUT_6CH)
val = 1;
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 3, 3);
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 7, 7);
/* Configure IEC-60958 User bits */
/* TODO: should be set by user. */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSU, 0, 7, 0);
/* Configure IEC-60958 Channel Status word */
/* CGMSA */
val = cfg->iec60958_cfg->status[5] & IEC958_AES5_CON_CGMSA;
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(0), val, 5, 4);
/* Copyright */
val = (cfg->iec60958_cfg->status[0] &
IEC958_AES0_CON_NOT_COPYRIGHT) >> 2;
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(0), val, 0, 0);
/* Category */
hdmi_write_reg(base, HDMI_CORE_FC_AUDSCHNLS(1),
cfg->iec60958_cfg->status[1]);
/* PCM audio mode */
val = (cfg->iec60958_cfg->status[0] & IEC958_AES0_CON_MODE) >> 6;
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 6, 4);
/* Source number */
val = cfg->iec60958_cfg->status[2] & IEC958_AES2_CON_SOURCE;
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 3, 0);
/* Channel number right 0 */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 2, 3, 0);
/* Channel number right 1*/
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 4, 7, 4);
/* Channel number right 2 */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(4), 6, 3, 0);
/* Channel number right 3*/
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(4), 8, 7, 4);
/* Channel number left 0 */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(5), 1, 3, 0);
/* Channel number left 1*/
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(5), 3, 7, 4);
/* Channel number left 2 */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(6), 5, 3, 0);
/* Channel number left 3*/
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(6), 7, 7, 4);
/* Clock accuracy and sample rate */
hdmi_write_reg(base, HDMI_CORE_FC_AUDSCHNLS(7),
cfg->iec60958_cfg->status[3]);
/* Original sample rate and word length */
hdmi_write_reg(base, HDMI_CORE_FC_AUDSCHNLS(8),
cfg->iec60958_cfg->status[4]);
/* Enable FIFO empty and full interrupts */
REG_FLD_MOD(base, HDMI_CORE_AUD_INT, 3, 3, 2);
/* Configure GPA */
/* select HBR/SPDIF interfaces */
if (cfg->layout == HDMI_AUDIO_LAYOUT_2CH) {
/* select HBR/SPDIF interfaces */
REG_FLD_MOD(base, HDMI_CORE_AUD_CONF0, 0, 5, 5);
/* enable two channels in GPA */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF1, 3, 7, 0);
} else if (cfg->layout == HDMI_AUDIO_LAYOUT_6CH) {
/* select HBR/SPDIF interfaces */
REG_FLD_MOD(base, HDMI_CORE_AUD_CONF0, 0, 5, 5);
/* enable six channels in GPA */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF1, 0x3F, 7, 0);
} else {
/* select HBR/SPDIF interfaces */
REG_FLD_MOD(base, HDMI_CORE_AUD_CONF0, 0, 5, 5);
/* enable eight channels in GPA */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF1, 0xFF, 7, 0);
}
/* disable HBR */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF2, 0, 0, 0);
/* enable PCUV */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF2, 1, 1, 1);
/* enable GPA FIFO full and empty mask */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_MASK, 3, 1, 0);
/* set polarity of GPA FIFO empty interrupts */
REG_FLD_MOD(base, HDMI_CORE_AUD_GP_POL, 1, 0, 0);
/* unmute audio */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 0, 7, 4);
}
static void hdmi5_core_audio_infoframe_cfg(struct hdmi_core_data *core,
struct snd_cea_861_aud_if *info_aud)
{
void __iomem *base = core->base;
/* channel count and coding type fields in AUDICONF0 are swapped */
hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF0,
(info_aud->db1_ct_cc & CEA861_AUDIO_INFOFRAME_DB1CC) << 4 |
(info_aud->db1_ct_cc & CEA861_AUDIO_INFOFRAME_DB1CT) >> 4);
hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF1, info_aud->db2_sf_ss);
hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF2, info_aud->db4_ca);
hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF3,
(info_aud->db5_dminh_lsv & CEA861_AUDIO_INFOFRAME_DB5_DM_INH) >> 3 |
(info_aud->db5_dminh_lsv & CEA861_AUDIO_INFOFRAME_DB5_LSV));
}
int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct omap_dss_audio *audio, u32 pclk)
{
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
if (!audio || !audio->iec || !audio->cea || !core)
return -EINVAL;
core_cfg.iec60958_cfg = audio->iec;
if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24) &&
(audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16))
word_length_16b = true;
/* only 16-bit word length supported atm */
if (!word_length_16b)
return -EINVAL;
switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
case IEC958_AES3_CON_FS_32000:
fs_nr = 32000;
break;
case IEC958_AES3_CON_FS_44100:
fs_nr = 44100;
break;
case IEC958_AES3_CON_FS_48000:
fs_nr = 48000;
break;
case IEC958_AES3_CON_FS_88200:
fs_nr = 88200;
break;
case IEC958_AES3_CON_FS_96000:
fs_nr = 96000;
break;
case IEC958_AES3_CON_FS_176400:
fs_nr = 176400;
break;
case IEC958_AES3_CON_FS_192000:
fs_nr = 192000;
break;
default:
return -EINVAL;
}
hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
/* Audio channels settings */
channel_count = (audio->cea->db1_ct_cc & CEA861_AUDIO_INFOFRAME_DB1CC)
+ 1;
if (channel_count == 2)
core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
else if (channel_count == 6)
core_cfg.layout = HDMI_AUDIO_LAYOUT_6CH;
else
core_cfg.layout = HDMI_AUDIO_LAYOUT_8CH;
/* DMA settings */
if (word_length_16b)
audio_dma.transfer_size = 0x10;
else
audio_dma.transfer_size = 0x20;
audio_dma.block_size = 0xC0;
audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
audio_dma.fifo_threshold = 0x20; /* in number of samples */
/* audio FIFO format settings for 16-bit samples*/
audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
/* only LPCM atm */
audio_format.type = HDMI_AUDIO_TYPE_LPCM;
/* only allowed option */
audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
/* disable start/stop signals of IEC 60958 blocks */
audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
/* configure DMA and audio FIFO format*/
hdmi_wp_audio_config_dma(wp, &audio_dma);
hdmi_wp_audio_config_format(wp, &audio_format);
/* configure the core */
hdmi5_core_audio_config(core, &core_cfg);
/* configure CEA 861 audio infoframe */
hdmi5_core_audio_infoframe_cfg(core, audio->cea);
return 0;
}
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base))
return PTR_ERR(core->base);
return 0;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi5_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI driver for OMAP5
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*
* Authors:
* Yong Zhi
* Mythri pk
* Archit Taneja <[email protected]>
* Tomi Valkeinen <[email protected]>
*/
#define DSS_SUBSYS_NAME "HDMI"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_edid.h>
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
static int hdmi_runtime_get(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_get\n");
r = pm_runtime_get_sync(&hdmi->pdev->dev);
if (WARN_ON(r < 0)) {
pm_runtime_put_noidle(&hdmi->pdev->dev);
return r;
}
return 0;
}
static void hdmi_runtime_put(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_put\n");
r = pm_runtime_put_sync(&hdmi->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
struct omap_hdmi *hdmi = data;
struct hdmi_wp_data *wp = &hdmi->wp;
u32 irqstatus;
irqstatus = hdmi_wp_get_irqstatus(wp);
hdmi_wp_set_irqstatus(wp, irqstatus);
if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
u32 v;
/*
* If we get both connect and disconnect interrupts at the same
* time, turn off the PHY, clear interrupts, and restart, which
* raises connect interrupt if a cable is connected, or nothing
* if cable is not connected.
*/
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
/*
* We always get bogus CONNECT & DISCONNECT interrupts when
* setting the PHY to LDOON. To ignore those, we force the RXDET
* line to 0 until the PHY power state has been changed.
*/
v = hdmi_read_reg(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL);
v = FLD_MOD(v, 1, 15, 15); /* FORCE_RXDET_HIGH */
v = FLD_MOD(v, 0, 14, 7); /* RXDET_LINE */
hdmi_write_reg(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL, v);
hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
HDMI_IRQ_LINK_DISCONNECT);
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
REG_FLD_MOD(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL, 0, 15, 15);
} else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
} else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
}
return IRQ_HANDLED;
}
static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
r = regulator_enable(hdmi->vdda_reg);
if (r)
return r;
r = hdmi_runtime_get(hdmi);
if (r)
goto err_runtime_get;
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(hdmi->dss, DSS_HDMI_M_PCLK);
hdmi->core_enabled = true;
return 0;
err_runtime_get:
regulator_disable(hdmi->vdda_reg);
return r;
}
static void hdmi_power_off_core(struct omap_hdmi *hdmi)
{
hdmi->core_enabled = false;
hdmi_runtime_put(hdmi);
regulator_disable(hdmi->vdda_reg);
}
static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
const struct videomode *vm;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned int pc;
r = hdmi_power_on_core(hdmi);
if (r)
return r;
vm = &hdmi->cfg.vm;
DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
vm->vactive);
pc = vm->pixelclock;
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
pc *= 10;
dss_pll_calc_b(&hdmi->pll.pll, clk_get_rate(hdmi->pll.pll.clkin),
pc, &hdmi_cinfo);
/* disable and clear irqs */
hdmi_wp_clear_irqenable(&hdmi->wp, 0xffffffff);
hdmi_wp_set_irqstatus(&hdmi->wp,
hdmi_wp_get_irqstatus(&hdmi->wp));
r = dss_pll_enable(&hdmi->pll.pll);
if (r) {
DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
r = dss_pll_set_config(&hdmi->pll.pll, &hdmi_cinfo);
if (r) {
DSSERR("Failed to configure PLL\n");
goto err_pll_cfg;
}
r = hdmi_phy_configure(&hdmi->phy, hdmi_cinfo.clkdco,
hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to start PHY\n");
goto err_phy_cfg;
}
r = hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_LDOON);
if (r)
goto err_phy_pwr;
hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
r = hdmi_wp_video_start(&hdmi->wp);
if (r)
goto err_vid_enable;
hdmi_wp_set_irqenable(&hdmi->wp,
HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
err_vid_enable:
dss_mgr_disable(&hdmi->output);
err_mgr_enable:
hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
err_pll_cfg:
dss_pll_disable(&hdmi->pll.pll);
err_pll_enable:
hdmi_power_off_core(hdmi);
return -EIO;
}
static void hdmi_power_off_full(struct omap_hdmi *hdmi)
{
hdmi_wp_clear_irqenable(&hdmi->wp, 0xffffffff);
hdmi_wp_video_stop(&hdmi->wp);
dss_mgr_disable(&hdmi->output);
hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
dss_pll_disable(&hdmi->pll.pll);
hdmi_power_off_core(hdmi);
}
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
mutex_lock(&hdmi->lock);
if (hdmi_runtime_get(hdmi)) {
mutex_unlock(&hdmi->lock);
return 0;
}
hdmi_wp_dump(&hdmi->wp, s);
hdmi_pll_dump(&hdmi->pll, s);
hdmi_phy_dump(&hdmi->phy, s);
hdmi5_core_dump(&hdmi->core, s);
hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
return 0;
}
static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
hdmi_wp_audio_enable(&hd->wp, true);
hdmi_wp_audio_core_req_enable(&hd->wp, true);
}
static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
{
hdmi_wp_audio_core_req_enable(&hd->wp, false);
hdmi_wp_audio_enable(&hd->wp, false);
REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
}
static int hdmi_core_enable(struct omap_hdmi *hdmi)
{
int r = 0;
DSSDBG("ENTER omapdss_hdmi_core_enable\n");
mutex_lock(&hdmi->lock);
r = hdmi_power_on_core(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
mutex_unlock(&hdmi->lock);
return 0;
err0:
mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
DSSDBG("Enter omapdss_hdmi_core_disable\n");
mutex_lock(&hdmi->lock);
hdmi_power_off_core(hdmi);
mutex_unlock(&hdmi->lock);
}
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static int hdmi5_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
bridge, flags);
}
static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
mutex_lock(&hdmi->lock);
drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
static void hdmi5_bridge_enable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
struct drm_atomic_state *state = bridge_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
unsigned long flags;
int ret;
/*
* None of these should fail, as the bridge can't be enabled without a
* valid CRTC to connector path with fully populated new states.
*/
connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (WARN_ON(!connector))
return;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
if (WARN_ON(!crtc_state))
return;
hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
? HDMI_HDMI : HDMI_DVI;
if (connector->display_info.is_hdmi) {
const struct drm_display_mode *mode;
struct hdmi_avi_infoframe avi;
mode = &crtc_state->adjusted_mode;
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
mode);
if (ret == 0)
hdmi->cfg.infoframe = avi;
}
mutex_lock(&hdmi->lock);
ret = hdmi_power_on_full(hdmi);
if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
ret = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
&hdmi->audio_config,
hdmi->cfg.vm.pixelclock);
if (ret) {
DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
}
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
if (hdmi->audio_configured && hdmi->audio_playing)
hdmi_start_audio_stream(hdmi);
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
done:
mutex_unlock(&hdmi->lock);
}
static void hdmi5_bridge_disable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
hdmi_stop_audio_stream(hdmi);
hdmi->display_enabled = false;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
hdmi_power_off_full(hdmi);
mutex_unlock(&hdmi->lock);
}
static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
struct edid *edid;
bool need_enable;
int idlemode;
int r;
need_enable = hdmi->core_enabled == false;
if (need_enable) {
r = hdmi_core_enable(hdmi);
if (r)
return NULL;
}
mutex_lock(&hdmi->lock);
r = hdmi_runtime_get(hdmi);
BUG_ON(r);
idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
/* No-idle mode */
REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
hdmi5_core_ddc_init(&hdmi->core);
edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
hdmi5_core_ddc_uninit(&hdmi->core);
REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
if (need_enable)
hdmi_core_disable(hdmi);
return (struct edid *)edid;
}
static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
.attach = hdmi5_bridge_attach,
.mode_set = hdmi5_bridge_mode_set,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = hdmi5_bridge_enable,
.atomic_disable = hdmi5_bridge_disable,
.get_edid = hdmi5_bridge_get_edid,
};
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
hdmi->bridge.funcs = &hdmi5_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&hdmi->bridge);
}
static void hdmi5_bridge_cleanup(struct omap_hdmi *hdmi)
{
drm_bridge_remove(&hdmi->bridge);
}
/* -----------------------------------------------------------------------------
* Audio Callbacks
*/
static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
mutex_lock(&hd->lock);
WARN_ON(hd->audio_abort_cb != NULL);
hd->audio_abort_cb = abort_cb;
mutex_unlock(&hd->lock);
return 0;
}
static int hdmi_audio_shutdown(struct device *dev)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
mutex_lock(&hd->lock);
hd->audio_abort_cb = NULL;
hd->audio_configured = false;
hd->audio_playing = false;
mutex_unlock(&hd->lock);
return 0;
}
static int hdmi_audio_start(struct device *dev)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&hd->audio_playing_lock, flags);
if (hd->display_enabled) {
if (!hdmi_mode_has_audio(&hd->cfg))
DSSERR("%s: Video mode does not support audio\n",
__func__);
hdmi_start_audio_stream(hd);
}
hd->audio_playing = true;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
return 0;
}
static void hdmi_audio_stop(struct device *dev)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
if (!hdmi_mode_has_audio(&hd->cfg))
DSSERR("%s: Video mode does not support audio\n", __func__);
spin_lock_irqsave(&hd->audio_playing_lock, flags);
if (hd->display_enabled)
hdmi_stop_audio_stream(hd);
hd->audio_playing = false;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
}
static int hdmi_audio_config(struct device *dev,
struct omap_dss_audio *dss_audio)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&hd->lock);
if (hd->display_enabled) {
ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
hd->cfg.vm.pixelclock);
if (ret)
goto out;
}
hd->audio_configured = true;
hd->audio_config = *dss_audio;
out:
mutex_unlock(&hd->lock);
return ret;
}
static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
.audio_startup = hdmi_audio_startup,
.audio_shutdown = hdmi_audio_shutdown,
.audio_start = hdmi_audio_start,
.audio_stop = hdmi_audio_stop,
.audio_config = hdmi_audio_config,
};
static int hdmi_audio_register(struct omap_hdmi *hdmi)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = &hdmi->pdev->dev,
.version = 5,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi->wp),
.ops = &hdmi_audio_ops,
};
hdmi->audio_pdev = platform_device_register_data(
&hdmi->pdev->dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
&pdata, sizeof(pdata));
if (IS_ERR(hdmi->audio_pdev))
return PTR_ERR(hdmi->audio_pdev);
hdmi_runtime_get(hdmi);
hdmi->wp_idlemode =
REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
hdmi_runtime_put(hdmi);
return 0;
}
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
static int hdmi5_bind(struct device *dev, struct device *master, void *data)
{
struct dss_device *dss = dss_get_device(master);
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
hdmi->dss = dss;
r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
if (r)
return r;
r = hdmi_audio_register(hdmi);
if (r) {
DSSERR("Registering HDMI audio failed %d\n", r);
goto err_pll_uninit;
}
hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
hdmi);
return 0;
err_pll_uninit:
hdmi_pll_uninit(&hdmi->pll);
return r;
}
static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
{
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
dss_debugfs_remove_file(hdmi->debugfs);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
hdmi_pll_uninit(&hdmi->pll);
}
static const struct component_ops hdmi5_component_ops = {
.bind = hdmi5_bind,
.unbind = hdmi5_unbind,
};
/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
static int hdmi5_init_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
int r;
hdmi5_bridge_init(hdmi);
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
if (r < 0) {
hdmi5_bridge_cleanup(hdmi);
return r;
}
omapdss_device_register(out);
return 0;
}
static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
hdmi5_bridge_cleanup(hdmi);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
{
struct platform_device *pdev = hdmi->pdev;
struct device_node *node = pdev->dev.of_node;
struct device_node *ep;
int r;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
of_node_put(ep);
return r;
}
static int hdmi5_probe(struct platform_device *pdev)
{
struct omap_hdmi *hdmi;
int irq;
int r;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
dev_set_drvdata(&pdev->dev, hdmi);
mutex_init(&hdmi->lock);
spin_lock_init(&hdmi->audio_playing_lock);
r = hdmi5_probe_of(hdmi);
if (r)
goto err_free;
r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
goto err_free;
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
goto err_free;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
goto err_free;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
goto err_free;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
goto err_free;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(hdmi->vdda_reg)) {
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
goto err_free;
}
pm_runtime_enable(&pdev->dev);
r = hdmi5_init_output(hdmi);
if (r)
goto err_pm_disable;
r = component_add(&pdev->dev, &hdmi5_component_ops);
if (r)
goto err_uninit_output;
return 0;
err_uninit_output:
hdmi5_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
err_free:
kfree(hdmi);
return r;
}
static void hdmi5_remove(struct platform_device *pdev)
{
struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
component_del(&pdev->dev, &hdmi5_component_ops);
hdmi5_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap5-hdmi", },
{ .compatible = "ti,dra7-hdmi", },
{},
};
struct platform_driver omapdss_hdmi5hw_driver = {
.probe = hdmi5_probe,
.remove_new = hdmi5_remove,
.driver = {
.name = "omapdss_hdmi5",
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
},
};
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi5.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI PLL
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
*/
#define DSS_SUBSYS_NAME "HDMIPLL"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/seq_file.h>
#include <linux/pm_runtime.h>
#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
{
#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(pll->base, r))
DUMPPLL(PLLCTRL_PLL_CONTROL);
DUMPPLL(PLLCTRL_PLL_STATUS);
DUMPPLL(PLLCTRL_PLL_GO);
DUMPPLL(PLLCTRL_CFG1);
DUMPPLL(PLLCTRL_CFG2);
DUMPPLL(PLLCTRL_CFG3);
DUMPPLL(PLLCTRL_SSC_CFG1);
DUMPPLL(PLLCTRL_SSC_CFG2);
DUMPPLL(PLLCTRL_CFG4);
}
static int hdmi_pll_enable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
int r;
r = pm_runtime_get_sync(&pll->pdev->dev);
WARN_ON(r < 0);
dss_ctrl_pll_enable(dsspll, true);
r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
if (r)
return r;
return 0;
}
static void hdmi_pll_disable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
int r;
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
dss_ctrl_pll_enable(dsspll, false);
r = pm_runtime_put_sync(&pll->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static const struct dss_pll_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.set_config = dss_pll_write_config_type_b,
};
static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
.type = DSS_PLL_TYPE_B,
.n_max = 255,
.m_min = 20,
.m_max = 4095,
.mX_max = 127,
.fint_min = 500000,
.fint_max = 2500000,
.clkdco_min = 500000000,
.clkdco_low = 1000000000,
.clkdco_max = 2000000000,
.n_msb = 8,
.n_lsb = 1,
.m_msb = 20,
.m_lsb = 9,
.mX_msb[0] = 24,
.mX_lsb[0] = 18,
.has_selfreqdco = true,
};
static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.type = DSS_PLL_TYPE_B,
.n_max = 255,
.m_min = 20,
.m_max = 2045,
.mX_max = 127,
.fint_min = 620000,
.fint_max = 2500000,
.clkdco_min = 750000000,
.clkdco_low = 1500000000,
.clkdco_max = 2500000000UL,
.n_msb = 8,
.n_lsb = 1,
.m_msb = 20,
.m_lsb = 9,
.mX_msb[0] = 24,
.mX_lsb[0] = 18,
.has_selfreqdco = true,
.has_refsel = true,
};
static int hdmi_init_pll_data(struct dss_device *dss,
struct platform_device *pdev,
struct hdmi_pll_data *hpll)
{
struct dss_pll *pll = &hpll->pll;
struct clk *clk;
int r;
clk = devm_clk_get(&pdev->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
return PTR_ERR(clk);
}
pll->name = "hdmi";
pll->id = DSS_PLL_HDMI;
pll->base = hpll->base;
pll->clkin = clk;
if (hpll->wp->version == 4)
pll->hw = &dss_omap4_hdmi_pll_hw;
else
pll->hw = &dss_omap5_hdmi_pll_hw;
pll->ops = &hdmi_pll_ops;
r = dss_pll_register(dss, pll);
if (r)
return r;
return 0;
}
int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev,
struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
{
int r;
pll->pdev = pdev;
pll->wp = wp;
pll->base = devm_platform_ioremap_resource_byname(pdev, "pll");
if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
r = hdmi_init_pll_data(dss, pdev, pll);
if (r) {
DSSERR("failed to init HDMI PLL\n");
return r;
}
return 0;
}
void hdmi_pll_uninit(struct hdmi_pll_data *hpll)
{
struct dss_pll *pll = &hpll->pll;
dss_pll_unregister(pll);
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi_pll.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*/
#define DSS_SUBSYS_NAME "DPI"
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
#include <linux/sys_soc.h>
#include <drm/drm_bridge.h>
#include "dss.h"
#include "omapdss.h"
struct dpi_data {
struct platform_device *pdev;
enum dss_model dss_model;
struct dss_device *dss;
unsigned int id;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
struct dss_pll *pll;
struct dss_lcd_mgr_config mgr_config;
unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
struct drm_bridge bridge;
};
#define drm_bridge_to_dpi(bridge) container_of(bridge, struct dpi_data, bridge)
/* -----------------------------------------------------------------------------
* Clock Handling and PLL
*/
static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
enum omap_channel channel)
{
/*
* Possible clock sources:
* LCD1: FCK/PLL1_1/HDMI_PLL
* LCD2: FCK/PLL1_3/HDMI_PLL (DRA74x: PLL2_3)
* LCD3: FCK/PLL1_3/HDMI_PLL (DRA74x: PLL2_1)
*/
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
{
if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_1))
return DSS_CLK_SRC_PLL1_1;
break;
}
case OMAP_DSS_CHANNEL_LCD2:
{
if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_3))
return DSS_CLK_SRC_PLL1_3;
if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL2_3))
return DSS_CLK_SRC_PLL2_3;
break;
}
case OMAP_DSS_CHANNEL_LCD3:
{
if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL2_1))
return DSS_CLK_SRC_PLL2_1;
if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_3))
return DSS_CLK_SRC_PLL1_3;
break;
}
default:
break;
}
return DSS_CLK_SRC_FCK;
}
static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
{
enum omap_channel channel = dpi->output.dispc_channel;
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
* would also be used for DISPC fclk. Meaning, when the DPI output is
* disabled, DISPC clock will be disabled, and TV out will stop.
*/
switch (dpi->dss_model) {
case DSS_MODEL_OMAP2:
case DSS_MODEL_OMAP3:
return DSS_CLK_SRC_FCK;
case DSS_MODEL_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD2:
return DSS_CLK_SRC_PLL2_1;
default:
return DSS_CLK_SRC_FCK;
}
case DSS_MODEL_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD3:
return DSS_CLK_SRC_PLL2_1;
case OMAP_DSS_CHANNEL_LCD2:
default:
return DSS_CLK_SRC_FCK;
}
case DSS_MODEL_DRA7:
return dpi_get_clk_src_dra7xx(dpi, channel);
default:
return DSS_CLK_SRC_FCK;
}
}
struct dpi_clk_calc_ctx {
struct dpi_data *dpi;
unsigned int clkout_idx;
/* inputs */
unsigned long pck_min, pck_max;
/* outputs */
struct dss_pll_clock_info pll_cinfo;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
/*
* Odd dividers give us uneven duty cycle, causing problem when level
* shifted. So skip all odd dividers when the pixel clock is on the
* higher side.
*/
if (ctx->pck_min >= 100000000) {
if (lckd > 1 && lckd % 2 != 0)
return false;
if (pckd > 1 && pckd % 2 != 0)
return false;
}
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
return true;
}
static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
return dispc_div_calc(ctx->dpi->dss->dispc, dispc,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco,
void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
ctx->pll_cinfo.n = n;
ctx->pll_cinfo.m = m;
ctx->pll_cinfo.fint = fint;
ctx->pll_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->dpi->pll, clkdco,
ctx->pck_min, dss_get_max_fck_rate(ctx->dpi->dss),
dpi_calc_hsdiv_cb, ctx);
}
static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
ctx->fck = fck;
return dispc_div_calc(ctx->dpi->dss->dispc, fck,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
struct dpi_clk_calc_ctx *ctx)
{
unsigned long clkin;
memset(ctx, 0, sizeof(*ctx));
ctx->dpi = dpi;
ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
clkin = clk_get_rate(dpi->pll->clkin);
if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
unsigned long pll_min, pll_max;
ctx->pck_min = pck - 1000;
ctx->pck_max = pck + 1000;
pll_min = 0;
pll_max = 0;
return dss_pll_calc_a(ctx->dpi->pll, clkin,
pll_min, pll_max,
dpi_calc_pll_cb, ctx);
} else { /* DSS_PLL_TYPE_B */
dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
ctx->dispc_cinfo.lck_div = 1;
ctx->dispc_cinfo.pck_div = 1;
ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
return true;
}
}
static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
struct dpi_clk_calc_ctx *ctx)
{
int i;
/*
* DSS fck gives us very few possibilities, so finding a good pixel
* clock may not be possible. We try multiple times to find the clock,
* each time widening the pixel clock range we look for, up to
* +/- ~15MHz.
*/
for (i = 0; i < 25; ++i) {
bool ok;
memset(ctx, 0, sizeof(*ctx));
ctx->dpi = dpi;
if (pck > 1000 * i * i * i)
ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
else
ctx->pck_min = 0;
ctx->pck_max = pck + 1000 * i * i * i;
ok = dss_div_calc(dpi->dss, pck, ctx->pck_min,
dpi_calc_dss_cb, ctx);
if (ok)
return ok;
}
return false;
}
static int dpi_set_pll_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
bool ok;
ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
if (r)
return r;
dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
return 0;
}
static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
bool ok;
ok = dpi_dss_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
r = dss_set_fck_rate(dpi->dss, ctx.fck);
if (r)
return r;
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
return 0;
}
static int dpi_set_mode(struct dpi_data *dpi)
{
int r;
if (dpi->pll)
r = dpi_set_pll_clk(dpi, dpi->pixelclock);
else
r = dpi_set_dispc_clk(dpi, dpi->pixelclock);
return r;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
{
dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dpi->mgr_config.stallmode = false;
dpi->mgr_config.fifohandcheck = false;
dpi->mgr_config.video_port_width = dpi->data_lines;
dpi->mgr_config.lcden_sig_polarity = 0;
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
static int dpi_clock_update(struct dpi_data *dpi, unsigned long *clock)
{
int lck_div, pck_div;
unsigned long fck;
struct dpi_clk_calc_ctx ctx;
if (dpi->pll) {
if (!dpi_pll_clk_calc(dpi, *clock, &ctx))
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
if (!dpi_dss_clk_calc(dpi, *clock, &ctx))
return -EINVAL;
fck = ctx.fck;
}
lck_div = ctx.dispc_cinfo.lck_div;
pck_div = ctx.dispc_cinfo.pck_div;
*clock = fck / lck_div / pck_div;
return 0;
}
static int dpi_verify_pll(struct dss_pll *pll)
{
int r;
/* do initial setup with the PLL to see if it is operational */
r = dss_pll_enable(pll);
if (r)
return r;
dss_pll_disable(pll);
return 0;
}
static void dpi_init_pll(struct dpi_data *dpi)
{
struct dss_pll *pll;
if (dpi->pll)
return;
dpi->clk_src = dpi_get_clk_src(dpi);
pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
if (!pll)
return;
if (dpi_verify_pll(pll)) {
DSSWARN("PLL not operational\n");
return;
}
dpi->pll = pll;
}
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static int dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
dpi_init_pll(dpi);
return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
bridge, flags);
}
static enum drm_mode_status
dpi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
unsigned long clock = mode->clock * 1000;
int ret;
if (mode->hdisplay % 8 != 0)
return MODE_BAD_WIDTH;
if (mode->clock == 0)
return MODE_NOCLOCK;
ret = dpi_clock_update(dpi, &clock);
if (ret < 0)
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static bool dpi_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
unsigned long clock = mode->clock * 1000;
int ret;
ret = dpi_clock_update(dpi, &clock);
if (ret < 0)
return false;
adjusted_mode->clock = clock / 1000;
return true;
}
static void dpi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
dpi->pixelclock = adjusted_mode->clock * 1000;
}
static void dpi_bridge_enable(struct drm_bridge *bridge)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
int r;
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
return;
}
r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
r = dss_dpi_select_source(dpi->dss, dpi->id, dpi->output.dispc_channel);
if (r)
goto err_src_sel;
if (dpi->pll) {
r = dss_pll_enable(dpi->pll);
if (r)
goto err_pll_init;
}
r = dpi_set_mode(dpi);
if (r)
goto err_set_mode;
dpi_config_lcd_manager(dpi);
mdelay(2);
r = dss_mgr_enable(&dpi->output);
if (r)
goto err_mgr_enable;
return;
err_mgr_enable:
err_set_mode:
if (dpi->pll)
dss_pll_disable(dpi->pll);
err_pll_init:
err_src_sel:
dispc_runtime_put(dpi->dss->dispc);
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
}
static void dpi_bridge_disable(struct drm_bridge *bridge)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
dss_mgr_disable(&dpi->output);
if (dpi->pll) {
dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
DSS_CLK_SRC_FCK);
dss_pll_disable(dpi->pll);
}
dispc_runtime_put(dpi->dss->dispc);
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
}
static const struct drm_bridge_funcs dpi_bridge_funcs = {
.attach = dpi_bridge_attach,
.mode_valid = dpi_bridge_mode_valid,
.mode_fixup = dpi_bridge_mode_fixup,
.mode_set = dpi_bridge_mode_set,
.enable = dpi_bridge_enable,
.disable = dpi_bridge_disable,
};
static void dpi_bridge_init(struct dpi_data *dpi)
{
dpi->bridge.funcs = &dpi_bridge_funcs;
dpi->bridge.of_node = dpi->pdev->dev.of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
drm_bridge_add(&dpi->bridge);
}
static void dpi_bridge_cleanup(struct dpi_data *dpi)
{
drm_bridge_remove(&dpi->bridge);
}
/* -----------------------------------------------------------------------------
* Initialisation and Cleanup
*/
/*
* Return a hardcoded channel for the DPI output. This should work for
* current use cases, but this can be later expanded to either resolve
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
{
switch (dpi->dss_model) {
case DSS_MODEL_OMAP2:
case DSS_MODEL_OMAP3:
return OMAP_DSS_CHANNEL_LCD;
case DSS_MODEL_DRA7:
switch (dpi->id) {
case 2:
return OMAP_DSS_CHANNEL_LCD3;
case 1:
return OMAP_DSS_CHANNEL_LCD2;
case 0:
default:
return OMAP_DSS_CHANNEL_LCD;
}
case DSS_MODEL_OMAP4:
return OMAP_DSS_CHANNEL_LCD2;
case DSS_MODEL_OMAP5:
return OMAP_DSS_CHANNEL_LCD3;
default:
DSSWARN("unsupported DSS version\n");
return OMAP_DSS_CHANNEL_LCD;
}
}
static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
u32 port_num = 0;
int r;
dpi_bridge_init(dpi);
of_property_read_u32(port, "reg", &port_num);
dpi->id = port_num <= 2 ? port_num : 0;
switch (port_num) {
case 2:
out->name = "dpi.2";
break;
case 1:
out->name = "dpi.1";
break;
case 0:
default:
out->name = "dpi.0";
break;
}
out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_port = port_num;
r = omapdss_device_init_output(out, &dpi->bridge);
if (r < 0) {
dpi_bridge_cleanup(dpi);
return r;
}
omapdss_device_register(out);
return 0;
}
static void dpi_uninit_output_port(struct device_node *port)
{
struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
dpi_bridge_cleanup(dpi);
}
/* -----------------------------------------------------------------------------
* Initialisation and Cleanup
*/
static const struct soc_device_attribute dpi_soc_devices[] = {
{ .machine = "OMAP3[456]*" },
{ .machine = "[AD]M37*" },
{ /* sentinel */ }
};
static int dpi_init_regulator(struct dpi_data *dpi)
{
struct regulator *vdds_dsi;
/*
* The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
* DM37xx only.
*/
if (!soc_device_match(dpi_soc_devices))
return 0;
vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
if (IS_ERR(vdds_dsi)) {
if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
dpi->vdds_dsi_reg = vdds_dsi;
return 0;
}
int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
struct device_node *port, enum dss_model dss_model)
{
struct dpi_data *dpi;
struct device_node *ep;
u32 datalines;
int r;
dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
if (!dpi)
return -ENOMEM;
ep = of_get_next_child(port, NULL);
if (!ep)
return 0;
r = of_property_read_u32(ep, "data-lines", &datalines);
of_node_put(ep);
if (r) {
DSSERR("failed to parse datalines\n");
return r;
}
dpi->data_lines = datalines;
dpi->pdev = pdev;
dpi->dss_model = dss_model;
dpi->dss = dss;
port->data = dpi;
r = dpi_init_regulator(dpi);
if (r)
return r;
return dpi_init_output_port(dpi, port);
}
void dpi_uninit_port(struct device_node *port)
{
struct dpi_data *dpi = port->data;
if (!dpi)
return;
dpi_uninit_output_port(port);
}
| linux-master | drivers/gpu/drm/omapdrm/dss/dpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI wrapper
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
*/
#define DSS_SUBSYS_NAME "HDMIWP"
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, hdmi_read_reg(wp->base, r))
DUMPREG(HDMI_WP_REVISION);
DUMPREG(HDMI_WP_SYSCONFIG);
DUMPREG(HDMI_WP_IRQSTATUS_RAW);
DUMPREG(HDMI_WP_IRQSTATUS);
DUMPREG(HDMI_WP_IRQENABLE_SET);
DUMPREG(HDMI_WP_IRQENABLE_CLR);
DUMPREG(HDMI_WP_IRQWAKEEN);
DUMPREG(HDMI_WP_PWR_CTRL);
DUMPREG(HDMI_WP_DEBOUNCE);
DUMPREG(HDMI_WP_VIDEO_CFG);
DUMPREG(HDMI_WP_VIDEO_SIZE);
DUMPREG(HDMI_WP_VIDEO_TIMING_H);
DUMPREG(HDMI_WP_VIDEO_TIMING_V);
DUMPREG(HDMI_WP_CLK);
DUMPREG(HDMI_WP_AUDIO_CFG);
DUMPREG(HDMI_WP_AUDIO_CFG2);
DUMPREG(HDMI_WP_AUDIO_CTRL);
DUMPREG(HDMI_WP_AUDIO_DATA);
}
u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp)
{
return hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
}
void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus)
{
hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, irqstatus);
/* flush posted write */
hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
}
void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask)
{
hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_SET, mask);
}
void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask)
{
hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_CLR, mask);
}
/* PHY_PWR_CMD */
int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val)
{
/* Return if already the state */
if (REG_GET(wp->base, HDMI_WP_PWR_CTRL, 5, 4) == val)
return 0;
/* Command for power control of HDMI PHY */
REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 7, 6);
/* Status of the power control of HDMI PHY */
if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val)
!= val) {
DSSERR("Failed to set PHY power mode to %d\n", val);
return -ETIMEDOUT;
}
return 0;
}
/* PLL_PWR_CMD */
int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val)
{
/* Command for power control of HDMI PLL */
REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 3, 2);
/* wait till PHY_PWR_STATUS is set */
if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val)
!= val) {
DSSERR("Failed to set PLL_PWR_STATUS\n");
return -ETIMEDOUT;
}
return 0;
}
int hdmi_wp_video_start(struct hdmi_wp_data *wp)
{
REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, true, 31, 31);
return 0;
}
void hdmi_wp_video_stop(struct hdmi_wp_data *wp)
{
int i;
hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, HDMI_IRQ_VIDEO_FRAME_DONE);
REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, false, 31, 31);
for (i = 0; i < 50; ++i) {
u32 v;
msleep(20);
v = hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS_RAW);
if (v & HDMI_IRQ_VIDEO_FRAME_DONE)
return;
}
DSSERR("no HDMI FRAMEDONE when disabling output\n");
}
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
const struct hdmi_video_format *video_fmt)
{
u32 l = 0;
REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, video_fmt->packing_mode,
10, 8);
l |= FLD_VAL(video_fmt->y_res, 31, 16);
l |= FLD_VAL(video_fmt->x_res, 15, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l);
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
const struct videomode *vm)
{
u32 r;
bool vsync_inv, hsync_inv;
DSSDBG("Enter hdmi_wp_video_config_interface\n");
vsync_inv = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
hsync_inv = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
r = FLD_MOD(r, 1, 7, 7); /* VSYNC_POL to dispc active high */
r = FLD_MOD(r, 1, 6, 6); /* HSYNC_POL to dispc active high */
r = FLD_MOD(r, vsync_inv, 5, 5); /* CORE_VSYNC_INV */
r = FLD_MOD(r, hsync_inv, 4, 4); /* CORE_HSYNC_INV */
r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
const struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
unsigned int hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
/*
* On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
* ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsync_len-1.
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
if (wp->version == 4)
hsync_len_offset = 0;
timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
timing_v |= FLD_VAL(vm->vfront_porch, 19, 8);
timing_v |= FLD_VAL(vm->vsync_len, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
struct videomode *vm, const struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
video_fmt->y_res = param->vm.vactive;
video_fmt->x_res = param->vm.hactive;
vm->hback_porch = param->vm.hback_porch;
vm->hfront_porch = param->vm.hfront_porch;
vm->hsync_len = param->vm.hsync_len;
vm->vback_porch = param->vm.vback_porch;
vm->vfront_porch = param->vm.vfront_porch;
vm->vsync_len = param->vm.vsync_len;
vm->flags = param->vm.flags;
if (param->vm.flags & DISPLAY_FLAGS_INTERLACED) {
video_fmt->y_res /= 2;
vm->vback_porch /= 2;
vm->vfront_porch /= 2;
vm->vsync_len /= 2;
}
if (param->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_fmt->x_res *= 2;
vm->hfront_porch *= 2;
vm->hsync_len *= 2;
vm->hback_porch *= 2;
}
}
void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
struct hdmi_audio_format *aud_fmt)
{
u32 r;
DSSDBG("Enter hdmi_wp_audio_config_format\n");
r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
if (wp->version == 4) {
r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
}
r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
r = FLD_MOD(r, aud_fmt->type, 4, 4);
r = FLD_MOD(r, aud_fmt->justification, 3, 3);
r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG, r);
}
void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
struct hdmi_audio_dma *aud_dma)
{
u32 r;
DSSDBG("Enter hdmi_wp_audio_config_dma\n");
r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG2);
r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
r = FLD_MOD(r, aud_dma->block_size, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG2, r);
r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CTRL);
r = FLD_MOD(r, aud_dma->mode, 9, 9);
r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CTRL, r);
}
int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable)
{
REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 31, 31);
return 0;
}
int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
{
REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 30, 30);
return 0;
}
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
unsigned int version)
{
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wp");
wp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(wp->base))
return PTR_ERR(wp->base);
wp->phys_base = res->start;
wp->version = version;
return 0;
}
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp)
{
return wp->phys_base + HDMI_WP_AUDIO_DATA;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi_wp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP Display Subsystem Base
*
* Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include "dss.h"
#include "omapdss.h"
struct dispc_device *dispc_get_dispc(struct dss_device *dss)
{
return dss->dispc;
}
/* -----------------------------------------------------------------------------
* OMAP DSS Devices Handling
*/
static LIST_HEAD(omapdss_devices_list);
static DEFINE_MUTEX(omapdss_devices_lock);
void omapdss_device_register(struct omap_dss_device *dssdev)
{
mutex_lock(&omapdss_devices_lock);
list_add_tail(&dssdev->list, &omapdss_devices_list);
mutex_unlock(&omapdss_devices_lock);
}
void omapdss_device_unregister(struct omap_dss_device *dssdev)
{
mutex_lock(&omapdss_devices_lock);
list_del(&dssdev->list);
mutex_unlock(&omapdss_devices_lock);
}
static bool omapdss_device_is_registered(struct device_node *node)
{
struct omap_dss_device *dssdev;
bool found = false;
mutex_lock(&omapdss_devices_lock);
list_for_each_entry(dssdev, &omapdss_devices_list, list) {
if (dssdev->dev->of_node == node) {
found = true;
break;
}
}
mutex_unlock(&omapdss_devices_lock);
return found;
}
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
{
if (get_device(dssdev->dev) == NULL)
return NULL;
return dssdev;
}
void omapdss_device_put(struct omap_dss_device *dssdev)
{
put_device(dssdev->dev);
}
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
struct omap_dss_device *dssdev;
list_for_each_entry(dssdev, &omapdss_devices_list, list) {
if (dssdev->dev->of_node == node)
return omapdss_device_get(dssdev);
}
return NULL;
}
/*
* Search for the next output device starting at @from. Release the reference to
* the @from device, and acquire a reference to the returned device if found.
*/
struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
{
struct omap_dss_device *dssdev;
struct list_head *list;
mutex_lock(&omapdss_devices_lock);
if (list_empty(&omapdss_devices_list)) {
dssdev = NULL;
goto done;
}
/*
* Start from the from entry if given or from omapdss_devices_list
* otherwise.
*/
list = from ? &from->list : &omapdss_devices_list;
list_for_each_entry(dssdev, list, list) {
/*
* Stop if we reach the omapdss_devices_list, that's the end of
* the list.
*/
if (&dssdev->list == &omapdss_devices_list) {
dssdev = NULL;
goto done;
}
if (dssdev->id && dssdev->bridge)
goto done;
}
dssdev = NULL;
done:
if (from)
omapdss_device_put(from);
if (dssdev)
omapdss_device_get(dssdev);
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
{
return dssdev->dss;
}
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst)
{
dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
src ? dev_name(src->dev) : "NULL",
dst ? dev_name(dst->dev) : "NULL");
if (!dst) {
/*
* The destination is NULL when the source is connected to a
* bridge instead of a DSS device. Stop here, we will attach
* the bridge later when we will have a DRM encoder.
*/
return src && src->bridge ? 0 : -EINVAL;
}
if (omapdss_device_is_connected(dst))
return -EBUSY;
dst->dss = dss;
return 0;
}
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
struct dss_device *dss = src ? src->dss : dst->dss;
dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n",
src ? dev_name(src->dev) : "NULL",
dst ? dev_name(dst->dev) : "NULL");
if (!dst) {
WARN_ON(!src->bridge);
return;
}
if (!dst->id && !omapdss_device_is_connected(dst)) {
WARN_ON(1);
return;
}
dst->dss = NULL;
}
/* -----------------------------------------------------------------------------
* Components Handling
*/
static struct list_head omapdss_comp_list;
struct omapdss_comp_node {
struct list_head list;
struct device_node *node;
bool dss_core_component;
const char *compat;
};
static bool omapdss_list_contains(const struct device_node *node)
{
struct omapdss_comp_node *comp;
list_for_each_entry(comp, &omapdss_comp_list, list) {
if (comp->node == node)
return true;
}
return false;
}
static void omapdss_walk_device(struct device *dev, struct device_node *node,
bool dss_core)
{
struct omapdss_comp_node *comp;
struct device_node *n;
const char *compat;
int ret;
ret = of_property_read_string(node, "compatible", &compat);
if (ret < 0)
return;
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (comp) {
comp->node = node;
comp->dss_core_component = dss_core;
comp->compat = compat;
list_add(&comp->list, &omapdss_comp_list);
}
/*
* of_graph_get_remote_port_parent() prints an error if there is no
* port/ports node. To avoid that, check first that there's the node.
*/
n = of_get_child_by_name(node, "ports");
if (!n)
n = of_get_child_by_name(node, "port");
if (!n)
return;
of_node_put(n);
n = NULL;
while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
struct device_node *pn = of_graph_get_remote_port_parent(n);
if (!pn)
continue;
if (!of_device_is_available(pn) || omapdss_list_contains(pn)) {
of_node_put(pn);
continue;
}
omapdss_walk_device(dev, pn, false);
}
}
void omapdss_gather_components(struct device *dev)
{
struct device_node *child;
INIT_LIST_HEAD(&omapdss_comp_list);
omapdss_walk_device(dev, dev->of_node, true);
for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
}
static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
if (comp->dss_core_component)
return true;
if (!strstarts(comp->compat, "omapdss,"))
return true;
if (omapdss_device_is_registered(comp->node))
return true;
return false;
}
bool omapdss_stack_is_ready(void)
{
struct omapdss_comp_node *comp;
list_for_each_entry(comp, &omapdss_comp_list, list) {
if (!omapdss_component_is_loaded(comp))
return false;
}
return true;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/base.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*/
#define DSS_SUBSYS_NAME "DSS"
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
struct dss_reg {
u16 idx;
};
#define DSS_REG(idx) ((const struct dss_reg) { idx })
#define DSS_REVISION DSS_REG(0x0000)
#define DSS_SYSCONFIG DSS_REG(0x0010)
#define DSS_SYSSTATUS DSS_REG(0x0014)
#define DSS_CONTROL DSS_REG(0x0040)
#define DSS_SDI_CONTROL DSS_REG(0x0044)
#define DSS_PLL_CONTROL DSS_REG(0x0048)
#define DSS_SDI_STATUS DSS_REG(0x005C)
#define REG_GET(dss, idx, start, end) \
FLD_GET(dss_read_reg(dss, idx), start, end)
#define REG_FLD_MOD(dss, idx, val, start, end) \
dss_write_reg(dss, idx, \
FLD_MOD(dss_read_reg(dss, idx), val, start, end))
struct dss_ops {
int (*dpi_select_source)(struct dss_device *dss, int port,
enum omap_channel channel);
int (*select_lcd_source)(struct dss_device *dss,
enum omap_channel channel,
enum dss_clk_source clk_src);
};
struct dss_features {
enum dss_model model;
u8 fck_div_max;
unsigned int fck_freq_max;
u8 dss_fck_multiplier;
const char *parent_clk_name;
const enum omap_display_type *ports;
int num_ports;
const enum omap_dss_output_id *outputs;
const struct dss_ops *ops;
struct dss_reg_field dispc_clk_switch;
bool has_lcd_clk_src;
};
static const char * const dss_generic_clk_source_names[] = {
[DSS_CLK_SRC_FCK] = "FCK",
[DSS_CLK_SRC_PLL1_1] = "PLL1:1",
[DSS_CLK_SRC_PLL1_2] = "PLL1:2",
[DSS_CLK_SRC_PLL1_3] = "PLL1:3",
[DSS_CLK_SRC_PLL2_1] = "PLL2:1",
[DSS_CLK_SRC_PLL2_2] = "PLL2:2",
[DSS_CLK_SRC_PLL2_3] = "PLL2:3",
[DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
};
static inline void dss_write_reg(struct dss_device *dss,
const struct dss_reg idx, u32 val)
{
__raw_writel(val, dss->base + idx.idx);
}
static inline u32 dss_read_reg(struct dss_device *dss, const struct dss_reg idx)
{
return __raw_readl(dss->base + idx.idx);
}
#define SR(dss, reg) \
dss->ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(dss, DSS_##reg)
#define RR(dss, reg) \
dss_write_reg(dss, DSS_##reg, dss->ctx[(DSS_##reg).idx / sizeof(u32)])
static void dss_save_context(struct dss_device *dss)
{
DSSDBG("dss_save_context\n");
SR(dss, CONTROL);
if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
SR(dss, SDI_CONTROL);
SR(dss, PLL_CONTROL);
}
dss->ctx_valid = true;
DSSDBG("context saved\n");
}
static void dss_restore_context(struct dss_device *dss)
{
DSSDBG("dss_restore_context\n");
if (!dss->ctx_valid)
return;
RR(dss, CONTROL);
if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
RR(dss, SDI_CONTROL);
RR(dss, PLL_CONTROL);
}
DSSDBG("context restored\n");
}
#undef SR
#undef RR
void dss_ctrl_pll_enable(struct dss_pll *pll, bool enable)
{
unsigned int shift;
unsigned int val;
if (!pll->dss->syscon_pll_ctrl)
return;
val = !enable;
switch (pll->id) {
case DSS_PLL_VIDEO1:
shift = 0;
break;
case DSS_PLL_VIDEO2:
shift = 1;
break;
case DSS_PLL_HDMI:
shift = 2;
break;
default:
DSSERR("illegal DSS PLL ID %d\n", pll->id);
return;
}
regmap_update_bits(pll->dss->syscon_pll_ctrl,
pll->dss->syscon_pll_ctrl_offset,
1 << shift, val << shift);
}
static int dss_ctrl_pll_set_control_mux(struct dss_device *dss,
enum dss_clk_source clk_src,
enum omap_channel channel)
{
unsigned int shift, val;
if (!dss->syscon_pll_ctrl)
return -EINVAL;
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
shift = 3;
switch (clk_src) {
case DSS_CLK_SRC_PLL1_1:
val = 0; break;
case DSS_CLK_SRC_HDMI_PLL:
val = 1; break;
default:
DSSERR("error in PLL mux config for LCD\n");
return -EINVAL;
}
break;
case OMAP_DSS_CHANNEL_LCD2:
shift = 5;
switch (clk_src) {
case DSS_CLK_SRC_PLL1_3:
val = 0; break;
case DSS_CLK_SRC_PLL2_3:
val = 1; break;
case DSS_CLK_SRC_HDMI_PLL:
val = 2; break;
default:
DSSERR("error in PLL mux config for LCD2\n");
return -EINVAL;
}
break;
case OMAP_DSS_CHANNEL_LCD3:
shift = 7;
switch (clk_src) {
case DSS_CLK_SRC_PLL2_1:
val = 0; break;
case DSS_CLK_SRC_PLL1_3:
val = 1; break;
case DSS_CLK_SRC_HDMI_PLL:
val = 2; break;
default:
DSSERR("error in PLL mux config for LCD3\n");
return -EINVAL;
}
break;
default:
DSSERR("error in PLL mux config\n");
return -EINVAL;
}
regmap_update_bits(dss->syscon_pll_ctrl, dss->syscon_pll_ctrl_offset,
0x3 << shift, val << shift);
return 0;
}
void dss_sdi_init(struct dss_device *dss, int datapairs)
{
u32 l;
BUG_ON(datapairs > 3 || datapairs < 1);
l = dss_read_reg(dss, DSS_SDI_CONTROL);
l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
dss_write_reg(dss, DSS_SDI_CONTROL, l);
l = dss_read_reg(dss, DSS_PLL_CONTROL);
l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
dss_write_reg(dss, DSS_PLL_CONTROL, l);
}
int dss_sdi_enable(struct dss_device *dss)
{
unsigned long timeout;
dispc_pck_free_enable(dss->dispc, 1);
/* Reset SDI PLL */
REG_FLD_MOD(dss, DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
udelay(1); /* wait 2x PCLK */
/* Lock SDI PLL */
REG_FLD_MOD(dss, DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
/* Waiting for PLL lock request to complete */
timeout = jiffies + msecs_to_jiffies(500);
while (dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 6)) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock request timed out\n");
goto err1;
}
}
/* Clearing PLL_GO bit */
REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 28, 28);
/* Waiting for PLL to lock */
timeout = jiffies + msecs_to_jiffies(500);
while (!(dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 5))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock timed out\n");
goto err1;
}
}
dispc_lcd_enable_signal(dss->dispc, 1);
/* Waiting for SDI reset to complete */
timeout = jiffies + msecs_to_jiffies(500);
while (!(dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 2))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("SDI reset timed out\n");
goto err2;
}
}
return 0;
err2:
dispc_lcd_enable_signal(dss->dispc, 0);
err1:
/* Reset SDI PLL */
REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
dispc_pck_free_enable(dss->dispc, 0);
return -ETIMEDOUT;
}
void dss_sdi_disable(struct dss_device *dss)
{
dispc_lcd_enable_signal(dss->dispc, 0);
dispc_pck_free_enable(dss->dispc, 0);
/* Reset SDI PLL */
REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
{
return dss_generic_clk_source_names[clk_src];
}
static void dss_dump_clocks(struct dss_device *dss, struct seq_file *s)
{
const char *fclk_name;
unsigned long fclk_rate;
if (dss_runtime_get(dss))
return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
fclk_rate = clk_get_rate(dss->dss_clk);
seq_printf(s, "%s = %lu\n",
fclk_name,
fclk_rate);
dss_runtime_put(dss);
}
static int dss_dump_regs(struct seq_file *s, void *p)
{
struct dss_device *dss = s->private;
#define DUMPREG(dss, r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(dss, r))
if (dss_runtime_get(dss))
return 0;
DUMPREG(dss, DSS_REVISION);
DUMPREG(dss, DSS_SYSCONFIG);
DUMPREG(dss, DSS_SYSSTATUS);
DUMPREG(dss, DSS_CONTROL);
if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
DUMPREG(dss, DSS_SDI_CONTROL);
DUMPREG(dss, DSS_PLL_CONTROL);
DUMPREG(dss, DSS_SDI_STATUS);
}
dss_runtime_put(dss);
#undef DUMPREG
return 0;
}
static int dss_debug_dump_clocks(struct seq_file *s, void *p)
{
struct dss_device *dss = s->private;
dss_dump_clocks(dss, s);
dispc_dump_clocks(dss->dispc, s);
return 0;
}
static int dss_get_channel_index(enum omap_channel channel)
{
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 1;
case OMAP_DSS_CHANNEL_LCD3:
return 2;
default:
WARN_ON(1);
return 0;
}
}
static void dss_select_dispc_clk_source(struct dss_device *dss,
enum dss_clk_source clk_src)
{
int b;
/*
* We always use PRCM clock as the DISPC func clock, except on DSS3,
* where we don't have separate DISPC and LCD clock sources.
*/
if (WARN_ON(dss->feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
return;
switch (clk_src) {
case DSS_CLK_SRC_FCK:
b = 0;
break;
case DSS_CLK_SRC_PLL1_1:
b = 1;
break;
case DSS_CLK_SRC_PLL2_1:
b = 2;
break;
default:
BUG();
return;
}
REG_FLD_MOD(dss, DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
dss->feat->dispc_clk_switch.start,
dss->feat->dispc_clk_switch.end);
dss->dispc_clk_source = clk_src;
}
void dss_select_dsi_clk_source(struct dss_device *dss, int dsi_module,
enum dss_clk_source clk_src)
{
int b, pos;
switch (clk_src) {
case DSS_CLK_SRC_FCK:
b = 0;
break;
case DSS_CLK_SRC_PLL1_2:
BUG_ON(dsi_module != 0);
b = 1;
break;
case DSS_CLK_SRC_PLL2_2:
BUG_ON(dsi_module != 1);
b = 1;
break;
default:
BUG();
return;
}
pos = dsi_module == 0 ? 1 : 10;
REG_FLD_MOD(dss, DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
dss->dsi_clk_source[dsi_module] = clk_src;
}
static int dss_lcd_clk_mux_dra7(struct dss_device *dss,
enum omap_channel channel,
enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
[OMAP_DSS_CHANNEL_LCD2] = 12,
[OMAP_DSS_CHANNEL_LCD3] = 19,
};
u8 ctrl_bit = ctrl_bits[channel];
int r;
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return -EINVAL;
}
r = dss_ctrl_pll_set_control_mux(dss, clk_src, channel);
if (r)
return r;
REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
static int dss_lcd_clk_mux_omap5(struct dss_device *dss,
enum omap_channel channel,
enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
[OMAP_DSS_CHANNEL_LCD2] = 12,
[OMAP_DSS_CHANNEL_LCD3] = 19,
};
const enum dss_clk_source allowed_plls[] = {
[OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
[OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
[OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
};
u8 ctrl_bit = ctrl_bits[channel];
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return -EINVAL;
}
if (WARN_ON(allowed_plls[channel] != clk_src))
return -EINVAL;
REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
static int dss_lcd_clk_mux_omap4(struct dss_device *dss,
enum omap_channel channel,
enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
[OMAP_DSS_CHANNEL_LCD2] = 12,
};
const enum dss_clk_source allowed_plls[] = {
[OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
[OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
};
u8 ctrl_bit = ctrl_bits[channel];
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return 0;
}
if (WARN_ON(allowed_plls[channel] != clk_src))
return -EINVAL;
REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
void dss_select_lcd_clk_source(struct dss_device *dss,
enum omap_channel channel,
enum dss_clk_source clk_src)
{
int idx = dss_get_channel_index(channel);
int r;
if (!dss->feat->has_lcd_clk_src) {
dss_select_dispc_clk_source(dss, clk_src);
dss->lcd_clk_source[idx] = clk_src;
return;
}
r = dss->feat->ops->select_lcd_source(dss, channel, clk_src);
if (r)
return;
dss->lcd_clk_source[idx] = clk_src;
}
enum dss_clk_source dss_get_dispc_clk_source(struct dss_device *dss)
{
return dss->dispc_clk_source;
}
enum dss_clk_source dss_get_dsi_clk_source(struct dss_device *dss,
int dsi_module)
{
return dss->dsi_clk_source[dsi_module];
}
enum dss_clk_source dss_get_lcd_clk_source(struct dss_device *dss,
enum omap_channel channel)
{
if (dss->feat->has_lcd_clk_src) {
int idx = dss_get_channel_index(channel);
return dss->lcd_clk_source[idx];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
* OMAP2 and OMAP3 */
return dss->dispc_clk_source;
}
}
bool dss_div_calc(struct dss_device *dss, unsigned long pck,
unsigned long fck_min, dss_div_calc_func func, void *data)
{
int fckd, fckd_start, fckd_stop;
unsigned long fck;
unsigned long fck_hw_max;
unsigned long fckd_hw_max;
unsigned long prate;
unsigned int m;
fck_hw_max = dss->feat->fck_freq_max;
if (dss->parent_clk == NULL) {
unsigned int pckd;
pckd = fck_hw_max / pck;
fck = pck * pckd;
fck = clk_round_rate(dss->dss_clk, fck);
return func(fck, data);
}
fckd_hw_max = dss->feat->fck_div_max;
m = dss->feat->dss_fck_multiplier;
prate = clk_get_rate(dss->parent_clk);
fck_min = fck_min ? fck_min : 1;
fckd_start = min(prate * m / fck_min, fckd_hw_max);
fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul);
for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
fck = DIV_ROUND_UP(prate, fckd) * m;
if (func(fck, data))
return true;
}
return false;
}
int dss_set_fck_rate(struct dss_device *dss, unsigned long rate)
{
int r;
DSSDBG("set fck to %lu\n", rate);
r = clk_set_rate(dss->dss_clk, rate);
if (r)
return r;
dss->dss_clk_rate = clk_get_rate(dss->dss_clk);
WARN_ONCE(dss->dss_clk_rate != rate, "clk rate mismatch: %lu != %lu",
dss->dss_clk_rate, rate);
return 0;
}
unsigned long dss_get_dispc_clk_rate(struct dss_device *dss)
{
return dss->dss_clk_rate;
}
unsigned long dss_get_max_fck_rate(struct dss_device *dss)
{
return dss->feat->fck_freq_max;
}
static int dss_setup_default_clock(struct dss_device *dss)
{
unsigned long max_dss_fck, prate;
unsigned long fck;
unsigned int fck_div;
int r;
max_dss_fck = dss->feat->fck_freq_max;
if (dss->parent_clk == NULL) {
fck = clk_round_rate(dss->dss_clk, max_dss_fck);
} else {
prate = clk_get_rate(dss->parent_clk);
fck_div = DIV_ROUND_UP(prate * dss->feat->dss_fck_multiplier,
max_dss_fck);
fck = DIV_ROUND_UP(prate, fck_div)
* dss->feat->dss_fck_multiplier;
}
r = dss_set_fck_rate(dss, fck);
if (r)
return r;
return 0;
}
void dss_set_venc_output(struct dss_device *dss, enum omap_dss_venc_type type)
{
int l = 0;
if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
l = 0;
else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
l = 1;
else
BUG();
/* venc out selection. 0 = comp, 1 = svideo */
REG_FLD_MOD(dss, DSS_CONTROL, l, 6, 6);
}
void dss_set_dac_pwrdn_bgz(struct dss_device *dss, bool enable)
{
/* DAC Power-Down Control */
REG_FLD_MOD(dss, DSS_CONTROL, enable, 5, 5);
}
void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
enum dss_hdmi_venc_clk_source_select src)
{
enum omap_dss_output_id outputs;
outputs = dss->feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
/* Complain about invalid selections */
WARN_ON((src == DSS_VENC_TV_CLK) && !(outputs & OMAP_DSS_OUTPUT_VENC));
WARN_ON((src == DSS_HDMI_M_PCLK) && !(outputs & OMAP_DSS_OUTPUT_HDMI));
/* Select only if we have options */
if ((outputs & OMAP_DSS_OUTPUT_VENC) &&
(outputs & OMAP_DSS_OUTPUT_HDMI))
/* VENC_HDMI_SWITCH */
REG_FLD_MOD(dss, DSS_CONTROL, src, 15, 15);
}
static int dss_dpi_select_source_omap2_omap3(struct dss_device *dss, int port,
enum omap_channel channel)
{
if (channel != OMAP_DSS_CHANNEL_LCD)
return -EINVAL;
return 0;
}
static int dss_dpi_select_source_omap4(struct dss_device *dss, int port,
enum omap_channel channel)
{
int val;
switch (channel) {
case OMAP_DSS_CHANNEL_LCD2:
val = 0;
break;
case OMAP_DSS_CHANNEL_DIGIT:
val = 1;
break;
default:
return -EINVAL;
}
REG_FLD_MOD(dss, DSS_CONTROL, val, 17, 17);
return 0;
}
static int dss_dpi_select_source_omap5(struct dss_device *dss, int port,
enum omap_channel channel)
{
int val;
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
val = 1;
break;
case OMAP_DSS_CHANNEL_LCD2:
val = 2;
break;
case OMAP_DSS_CHANNEL_LCD3:
val = 3;
break;
case OMAP_DSS_CHANNEL_DIGIT:
val = 0;
break;
default:
return -EINVAL;
}
REG_FLD_MOD(dss, DSS_CONTROL, val, 17, 16);
return 0;
}
static int dss_dpi_select_source_dra7xx(struct dss_device *dss, int port,
enum omap_channel channel)
{
switch (port) {
case 0:
return dss_dpi_select_source_omap5(dss, port, channel);
case 1:
if (channel != OMAP_DSS_CHANNEL_LCD2)
return -EINVAL;
break;
case 2:
if (channel != OMAP_DSS_CHANNEL_LCD3)
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
int dss_dpi_select_source(struct dss_device *dss, int port,
enum omap_channel channel)
{
return dss->feat->ops->dpi_select_source(dss, port, channel);
}
static int dss_get_clocks(struct dss_device *dss)
{
struct clk *clk;
clk = devm_clk_get(&dss->pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get clock fck\n");
return PTR_ERR(clk);
}
dss->dss_clk = clk;
if (dss->feat->parent_clk_name) {
clk = clk_get(NULL, dss->feat->parent_clk_name);
if (IS_ERR(clk)) {
DSSERR("Failed to get %s\n",
dss->feat->parent_clk_name);
return PTR_ERR(clk);
}
} else {
clk = NULL;
}
dss->parent_clk = clk;
return 0;
}
static void dss_put_clocks(struct dss_device *dss)
{
if (dss->parent_clk)
clk_put(dss->parent_clk);
}
int dss_runtime_get(struct dss_device *dss)
{
int r;
DSSDBG("dss_runtime_get\n");
r = pm_runtime_get_sync(&dss->pdev->dev);
if (WARN_ON(r < 0)) {
pm_runtime_put_noidle(&dss->pdev->dev);
return r;
}
return 0;
}
void dss_runtime_put(struct dss_device *dss)
{
int r;
DSSDBG("dss_runtime_put\n");
r = pm_runtime_put_sync(&dss->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
}
struct dss_device *dss_get_device(struct device *dev)
{
return dev_get_drvdata(dev);
}
/* DEBUGFS */
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
static int dss_initialize_debugfs(struct dss_device *dss)
{
struct dentry *dir;
dir = debugfs_create_dir("omapdss", NULL);
if (IS_ERR(dir))
return PTR_ERR(dir);
dss->debugfs.root = dir;
return 0;
}
static void dss_uninitialize_debugfs(struct dss_device *dss)
{
debugfs_remove_recursive(dss->debugfs.root);
}
struct dss_debugfs_entry {
struct dentry *dentry;
int (*show_fn)(struct seq_file *s, void *data);
void *data;
};
static int dss_debug_open(struct inode *inode, struct file *file)
{
struct dss_debugfs_entry *entry = inode->i_private;
return single_open(file, entry->show_fn, entry->data);
}
static const struct file_operations dss_debug_fops = {
.open = dss_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
struct dss_debugfs_entry *
dss_debugfs_create_file(struct dss_device *dss, const char *name,
int (*show_fn)(struct seq_file *s, void *data),
void *data)
{
struct dss_debugfs_entry *entry;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
entry->show_fn = show_fn;
entry->data = data;
entry->dentry = debugfs_create_file(name, 0444, dss->debugfs.root,
entry, &dss_debug_fops);
return entry;
}
void dss_debugfs_remove_file(struct dss_debugfs_entry *entry)
{
if (IS_ERR_OR_NULL(entry))
return;
debugfs_remove(entry->dentry);
kfree(entry);
}
#else /* CONFIG_OMAP2_DSS_DEBUGFS */
static inline int dss_initialize_debugfs(struct dss_device *dss)
{
return 0;
}
static inline void dss_uninitialize_debugfs(struct dss_device *dss)
{
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
static const struct dss_ops dss_ops_omap2_omap3 = {
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_ops dss_ops_omap4 = {
.dpi_select_source = &dss_dpi_select_source_omap4,
.select_lcd_source = &dss_lcd_clk_mux_omap4,
};
static const struct dss_ops dss_ops_omap5 = {
.dpi_select_source = &dss_dpi_select_source_omap5,
.select_lcd_source = &dss_lcd_clk_mux_omap5,
};
static const struct dss_ops dss_ops_dra7 = {
.dpi_select_source = &dss_dpi_select_source_dra7xx,
.select_lcd_source = &dss_lcd_clk_mux_dra7,
};
static const enum omap_display_type omap2plus_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
};
static const enum omap_display_type omap34xx_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
OMAP_DISPLAY_TYPE_SDI,
};
static const enum omap_display_type dra7xx_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
OMAP_DISPLAY_TYPE_DPI,
OMAP_DISPLAY_TYPE_DPI,
};
static const enum omap_dss_output_id omap2_dss_supported_outputs[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DSS_OUTPUT_VENC,
};
static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DSS_OUTPUT_VENC,
};
static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
OMAP_DSS_OUTPUT_DSI1,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DSS_OUTPUT_VENC,
};
static const enum omap_dss_output_id am43xx_dss_supported_outputs[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
};
static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
/* OMAP_DSS_CHANNEL_LCD2 */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
OMAP_DSS_OUTPUT_DSI2,
};
static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DSS_OUTPUT_HDMI,
/* OMAP_DSS_CHANNEL_LCD2 */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
OMAP_DSS_OUTPUT_DSI1,
/* OMAP_DSS_CHANNEL_LCD3 */
OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
OMAP_DSS_OUTPUT_DSI2,
};
static const struct dss_features omap24xx_dss_feats = {
.model = DSS_MODEL_OMAP2,
/*
* fck div max is really 16, but the divider range has gaps. The range
* from 1 to 6 has no gaps, so let's use that as a max.
*/
.fck_div_max = 6,
.fck_freq_max = 133000000,
.dss_fck_multiplier = 2,
.parent_clk_name = "core_ck",
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
.outputs = omap2_dss_supported_outputs,
.ops = &dss_ops_omap2_omap3,
.dispc_clk_switch = { 0, 0 },
.has_lcd_clk_src = false,
};
static const struct dss_features omap34xx_dss_feats = {
.model = DSS_MODEL_OMAP3,
.fck_div_max = 16,
.fck_freq_max = 173000000,
.dss_fck_multiplier = 2,
.parent_clk_name = "dpll4_ck",
.ports = omap34xx_ports,
.outputs = omap3430_dss_supported_outputs,
.num_ports = ARRAY_SIZE(omap34xx_ports),
.ops = &dss_ops_omap2_omap3,
.dispc_clk_switch = { 0, 0 },
.has_lcd_clk_src = false,
};
static const struct dss_features omap3630_dss_feats = {
.model = DSS_MODEL_OMAP3,
.fck_div_max = 31,
.fck_freq_max = 173000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
.outputs = omap3630_dss_supported_outputs,
.ops = &dss_ops_omap2_omap3,
.dispc_clk_switch = { 0, 0 },
.has_lcd_clk_src = false,
};
static const struct dss_features omap44xx_dss_feats = {
.model = DSS_MODEL_OMAP4,
.fck_div_max = 32,
.fck_freq_max = 186000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
.outputs = omap4_dss_supported_outputs,
.ops = &dss_ops_omap4,
.dispc_clk_switch = { 9, 8 },
.has_lcd_clk_src = true,
};
static const struct dss_features omap54xx_dss_feats = {
.model = DSS_MODEL_OMAP5,
.fck_div_max = 64,
.fck_freq_max = 209250000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
.outputs = omap5_dss_supported_outputs,
.ops = &dss_ops_omap5,
.dispc_clk_switch = { 9, 7 },
.has_lcd_clk_src = true,
};
static const struct dss_features am43xx_dss_feats = {
.model = DSS_MODEL_OMAP3,
.fck_div_max = 0,
.fck_freq_max = 200000000,
.dss_fck_multiplier = 0,
.parent_clk_name = NULL,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
.outputs = am43xx_dss_supported_outputs,
.ops = &dss_ops_omap2_omap3,
.dispc_clk_switch = { 0, 0 },
.has_lcd_clk_src = true,
};
static const struct dss_features dra7xx_dss_feats = {
.model = DSS_MODEL_DRA7,
.fck_div_max = 64,
.fck_freq_max = 209250000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
.ports = dra7xx_ports,
.num_ports = ARRAY_SIZE(dra7xx_ports),
.outputs = omap5_dss_supported_outputs,
.ops = &dss_ops_dra7,
.dispc_clk_switch = { 9, 7 },
.has_lcd_clk_src = true,
};
static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
unsigned int i;
for (i = 0; i < num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
dpi_uninit_port(port);
break;
case OMAP_DISPLAY_TYPE_SDI:
sdi_uninit_port(port);
break;
default:
break;
}
of_node_put(port);
}
}
static int dss_init_ports(struct dss_device *dss)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
unsigned int i;
int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
r = dpi_init_port(dss, pdev, port, dss->feat->model);
if (r)
goto error;
break;
case OMAP_DISPLAY_TYPE_SDI:
r = sdi_init_port(dss, pdev, port);
if (r)
goto error;
break;
default:
break;
}
of_node_put(port);
}
return 0;
error:
of_node_put(port);
__dss_uninit_ports(dss, i);
return r;
}
static void dss_uninit_ports(struct dss_device *dss)
{
__dss_uninit_ports(dss, dss->feat->num_ports);
}
static int dss_video_pll_probe(struct dss_device *dss)
{
struct platform_device *pdev = dss->pdev;
struct device_node *np = pdev->dev.of_node;
struct regulator *pll_regulator;
int r;
if (!np)
return 0;
if (of_property_read_bool(np, "syscon-pll-ctrl")) {
dss->syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
"syscon-pll-ctrl");
if (IS_ERR(dss->syscon_pll_ctrl)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl regmap\n");
return PTR_ERR(dss->syscon_pll_ctrl);
}
if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1,
&dss->syscon_pll_ctrl_offset)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl offset\n");
return -EINVAL;
}
}
pll_regulator = devm_regulator_get(&pdev->dev, "vdda_video");
if (IS_ERR(pll_regulator)) {
r = PTR_ERR(pll_regulator);
switch (r) {
case -ENOENT:
pll_regulator = NULL;
break;
case -EPROBE_DEFER:
return -EPROBE_DEFER;
default:
DSSERR("can't get DPLL VDDA regulator\n");
return r;
}
}
if (of_property_match_string(np, "reg-names", "pll1") >= 0) {
dss->video1_pll = dss_video_pll_init(dss, pdev, 0,
pll_regulator);
if (IS_ERR(dss->video1_pll))
return PTR_ERR(dss->video1_pll);
}
if (of_property_match_string(np, "reg-names", "pll2") >= 0) {
dss->video2_pll = dss_video_pll_init(dss, pdev, 1,
pll_regulator);
if (IS_ERR(dss->video2_pll)) {
dss_video_pll_uninit(dss->video1_pll);
return PTR_ERR(dss->video2_pll);
}
}
return 0;
}
/* DSS HW IP initialisation */
static const struct of_device_id dss_of_match[] = {
{ .compatible = "ti,omap2-dss", .data = &omap24xx_dss_feats },
{ .compatible = "ti,omap3-dss", .data = &omap3630_dss_feats },
{ .compatible = "ti,omap4-dss", .data = &omap44xx_dss_feats },
{ .compatible = "ti,omap5-dss", .data = &omap54xx_dss_feats },
{ .compatible = "ti,dra7-dss", .data = &dra7xx_dss_feats },
{},
};
MODULE_DEVICE_TABLE(of, dss_of_match);
static const struct soc_device_attribute dss_soc_devices[] = {
{ .machine = "OMAP3430/3530", .data = &omap34xx_dss_feats },
{ .machine = "AM35??", .data = &omap34xx_dss_feats },
{ .family = "AM43xx", .data = &am43xx_dss_feats },
{ /* sentinel */ }
};
static int dss_bind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
struct platform_device *drm_pdev;
struct dss_pdata pdata;
int r;
r = component_bind_all(dev, NULL);
if (r)
return r;
pm_set_vt_switch(0);
pdata.dss = dss;
drm_pdev = platform_device_register_data(NULL, "omapdrm", 0,
&pdata, sizeof(pdata));
if (IS_ERR(drm_pdev)) {
component_unbind_all(dev, NULL);
return PTR_ERR(drm_pdev);
}
dss->drm_pdev = drm_pdev;
return 0;
}
static void dss_unbind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
platform_device_unregister(dss->drm_pdev);
component_unbind_all(dev, NULL);
}
static const struct component_master_ops dss_component_ops = {
.bind = dss_bind,
.unbind = dss_unbind,
};
struct dss_component_match_data {
struct device *dev;
struct component_match **match;
};
static int dss_add_child_component(struct device *dev, void *data)
{
struct dss_component_match_data *cmatch = data;
struct component_match **match = cmatch->match;
/*
* HACK
* We don't have a working driver for rfbi, so skip it here always.
* Otherwise dss will never get probed successfully, as it will wait
* for rfbi to get probed.
*/
if (strstr(dev_name(dev), "rfbi"))
return 0;
/*
* Handle possible interconnect target modules defined within the DSS.
* The DSS components can be children of an interconnect target module
* after the device tree has been updated for the module data.
* See also omapdss_boot_init() for compatible fixup.
*/
if (strstr(dev_name(dev), "target-module"))
return device_for_each_child(dev, cmatch,
dss_add_child_component);
component_match_add(cmatch->dev, match, component_compare_dev, dev);
return 0;
}
static int dss_probe_hardware(struct dss_device *dss)
{
u32 rev;
int r;
r = dss_runtime_get(dss);
if (r)
return r;
dss->dss_clk_rate = clk_get_rate(dss->dss_clk);
/* Select DPLL */
REG_FLD_MOD(dss, DSS_CONTROL, 0, 0, 0);
dss_select_dispc_clk_source(dss, DSS_CLK_SRC_FCK);
#ifdef CONFIG_OMAP2_DSS_VENC
REG_FLD_MOD(dss, DSS_CONTROL, 1, 4, 4); /* venc dac demen */
REG_FLD_MOD(dss, DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(dss, DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
dss->dsi_clk_source[0] = DSS_CLK_SRC_FCK;
dss->dsi_clk_source[1] = DSS_CLK_SRC_FCK;
dss->dispc_clk_source = DSS_CLK_SRC_FCK;
dss->lcd_clk_source[0] = DSS_CLK_SRC_FCK;
dss->lcd_clk_source[1] = DSS_CLK_SRC_FCK;
rev = dss_read_reg(dss, DSS_REVISION);
pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dss_runtime_put(dss);
return 0;
}
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
struct dss_component_match_data cmatch;
struct component_match *match = NULL;
struct dss_device *dss;
int r;
dss = kzalloc(sizeof(*dss), GFP_KERNEL);
if (!dss)
return -ENOMEM;
dss->pdev = pdev;
platform_set_drvdata(pdev, dss);
r = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (r) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
goto err_free_dss;
}
/*
* The various OMAP3-based SoCs can't be told apart using the compatible
* string, use SoC device matching.
*/
soc = soc_device_match(dss_soc_devices);
if (soc)
dss->feat = soc->data;
else
dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
/* Map I/O registers, get and setup clocks. */
dss->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dss->base)) {
r = PTR_ERR(dss->base);
goto err_free_dss;
}
r = dss_get_clocks(dss);
if (r)
goto err_free_dss;
r = dss_setup_default_clock(dss);
if (r)
goto err_put_clocks;
/* Setup the video PLLs and the DPI and SDI ports. */
r = dss_video_pll_probe(dss);
if (r)
goto err_put_clocks;
r = dss_init_ports(dss);
if (r)
goto err_uninit_plls;
/* Enable runtime PM and probe the hardware. */
pm_runtime_enable(&pdev->dev);
r = dss_probe_hardware(dss);
if (r)
goto err_pm_runtime_disable;
/* Initialize debugfs. */
r = dss_initialize_debugfs(dss);
if (r)
goto err_pm_runtime_disable;
dss->debugfs.clk = dss_debugfs_create_file(dss, "clk",
dss_debug_dump_clocks, dss);
dss->debugfs.dss = dss_debugfs_create_file(dss, "dss", dss_dump_regs,
dss);
/* Add all the child devices as components. */
r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (r)
goto err_uninit_debugfs;
omapdss_gather_components(&pdev->dev);
cmatch.dev = &pdev->dev;
cmatch.match = &match;
device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
goto err_of_depopulate;
return 0;
err_of_depopulate:
of_platform_depopulate(&pdev->dev);
err_uninit_debugfs:
dss_debugfs_remove_file(dss->debugfs.clk);
dss_debugfs_remove_file(dss->debugfs.dss);
dss_uninitialize_debugfs(dss);
err_pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
dss_uninit_ports(dss);
err_uninit_plls:
if (dss->video1_pll)
dss_video_pll_uninit(dss->video1_pll);
if (dss->video2_pll)
dss_video_pll_uninit(dss->video2_pll);
err_put_clocks:
dss_put_clocks(dss);
err_free_dss:
kfree(dss);
return r;
}
static void dss_remove(struct platform_device *pdev)
{
struct dss_device *dss = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
component_master_del(&pdev->dev, &dss_component_ops);
dss_debugfs_remove_file(dss->debugfs.clk);
dss_debugfs_remove_file(dss->debugfs.dss);
dss_uninitialize_debugfs(dss);
pm_runtime_disable(&pdev->dev);
dss_uninit_ports(dss);
if (dss->video1_pll)
dss_video_pll_uninit(dss->video1_pll);
if (dss->video2_pll)
dss_video_pll_uninit(dss->video2_pll);
dss_put_clocks(dss);
kfree(dss);
}
static void dss_shutdown(struct platform_device *pdev)
{
DSSDBG("shutdown\n");
}
static __maybe_unused int dss_runtime_suspend(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
dss_save_context(dss);
dss_set_min_bus_tput(dev, 0);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static __maybe_unused int dss_runtime_resume(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
int r;
pinctrl_pm_select_default_state(dev);
/*
* Set an arbitrarily high tput request to ensure OPP100.
* What we should really do is to make a request to stay in OPP100,
* without any tput requirements, but that is not currently possible
* via the PM layer.
*/
r = dss_set_min_bus_tput(dev, 1000000000);
if (r)
return r;
dss_restore_context(dss);
return 0;
}
static const struct dev_pm_ops dss_pm_ops = {
SET_RUNTIME_PM_OPS(dss_runtime_suspend, dss_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
.remove_new = dss_remove,
.shutdown = dss_shutdown,
.driver = {
.name = "omapdss_dss",
.pm = &dss_pm_ops,
.of_match_table = dss_of_match,
.suppress_bind_attrs = true,
},
};
/* INIT */
static struct platform_driver * const omap_dss_drivers[] = {
&omap_dsshw_driver,
&omap_dispchw_driver,
#ifdef CONFIG_OMAP2_DSS_DSI
&omap_dsihw_driver,
#endif
#ifdef CONFIG_OMAP2_DSS_VENC
&omap_venchw_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
&omapdss_hdmi4hw_driver,
#endif
#ifdef CONFIG_OMAP5_DSS_HDMI
&omapdss_hdmi5hw_driver,
#endif
};
int __init omap_dss_init(void)
{
return platform_register_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
void omap_dss_exit(void)
{
platform_unregister_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
| linux-master | drivers/gpu/drm/omapdrm/dss/dss.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI PHY
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
{
#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(phy->base, r))
DUMPPHY(HDMI_TXPHY_TX_CTRL);
DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
DUMPPHY(HDMI_TXPHY_POWER_CTRL);
DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
if (phy->features->bist_ctrl)
DUMPPHY(HDMI_TXPHY_BIST_CONTROL);
}
int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes)
{
int i;
for (i = 0; i < 8; i += 2) {
u8 lane, pol;
int dx, dy;
dx = lanes[i];
dy = lanes[i + 1];
if (dx < 0 || dx >= 8)
return -EINVAL;
if (dy < 0 || dy >= 8)
return -EINVAL;
if (dx & 1) {
if (dy != dx - 1)
return -EINVAL;
pol = 1;
} else {
if (dy != dx + 1)
return -EINVAL;
pol = 0;
}
lane = dx / 2;
phy->lane_function[lane] = i / 2;
phy->lane_polarity[lane] = pol;
}
return 0;
}
static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy)
{
static const u16 pad_cfg_list[] = {
0x0123,
0x0132,
0x0312,
0x0321,
0x0231,
0x0213,
0x1023,
0x1032,
0x3012,
0x3021,
0x2031,
0x2013,
0x1203,
0x1302,
0x3102,
0x3201,
0x2301,
0x2103,
0x1230,
0x1320,
0x3120,
0x3210,
0x2310,
0x2130,
};
u16 lane_cfg = 0;
int i;
unsigned int lane_cfg_val;
u16 pol_val = 0;
for (i = 0; i < 4; ++i)
lane_cfg |= phy->lane_function[i] << ((3 - i) * 4);
pol_val |= phy->lane_polarity[0] << 0;
pol_val |= phy->lane_polarity[1] << 3;
pol_val |= phy->lane_polarity[2] << 2;
pol_val |= phy->lane_polarity[3] << 1;
for (i = 0; i < ARRAY_SIZE(pad_cfg_list); ++i)
if (pad_cfg_list[i] == lane_cfg)
break;
if (WARN_ON(i == ARRAY_SIZE(pad_cfg_list)))
i = 0;
lane_cfg_val = i;
REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, lane_cfg_val, 26, 22);
REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, pol_val, 30, 27);
}
int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
unsigned long lfbitclk)
{
u8 freqout;
/*
* Read address 0 in order to get the SCP reset done completed
* Dummy access performed to make sure reset is done
*/
hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL);
/*
* In OMAP5+, the HFBITCLK must be divided by 2 before issuing the
* HDMI_PHYPWRCMD_LDOON command.
*/
if (phy->features->bist_ctrl)
REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
/*
* If the hfbitclk != lfbitclk, it means the lfbitclk was configured
* to be used for TMDS.
*/
if (hfbitclk != lfbitclk)
freqout = 0;
else if (hfbitclk / 10 < phy->features->max_phy)
freqout = 1;
else
freqout = 2;
/*
* Write to phy address 0 to configure the clock
* use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
*/
REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, freqout, 31, 30);
/* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
/* Setup max LDO voltage */
if (phy->features->ldo_voltage)
REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
hdmi_phy_configure_lanes(phy);
return 0;
}
static const struct hdmi_phy_features omap44xx_phy_feats = {
.bist_ctrl = false,
.ldo_voltage = true,
.max_phy = 185675000,
};
static const struct hdmi_phy_features omap54xx_phy_feats = {
.bist_ctrl = true,
.ldo_voltage = false,
.max_phy = 186000000,
};
int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy,
unsigned int version)
{
if (version == 4)
phy->features = &omap44xx_phy_feats;
else
phy->features = &omap54xx_phy_feats;
phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(phy->base))
return PTR_ERR(phy->base);
return 0;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi_phy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include "omapdss.h"
#include "dss.h"
struct dss_video_pll {
struct dss_pll pll;
struct device *dev;
void __iomem *clkctrl_base;
};
#define REG_MOD(reg, val, start, end) \
writel_relaxed(FLD_MOD(readl_relaxed(reg), val, start, end), reg)
static void dss_dpll_enable_scp_clk(struct dss_video_pll *vpll)
{
REG_MOD(vpll->clkctrl_base, 1, 14, 14); /* CIO_CLK_ICG */
}
static void dss_dpll_disable_scp_clk(struct dss_video_pll *vpll)
{
REG_MOD(vpll->clkctrl_base, 0, 14, 14); /* CIO_CLK_ICG */
}
static void dss_dpll_power_enable(struct dss_video_pll *vpll)
{
REG_MOD(vpll->clkctrl_base, 2, 31, 30); /* PLL_POWER_ON_ALL */
/*
* DRA7x PLL CTRL's PLL_PWR_STATUS seems to always return 0,
* so we have to use fixed delay here.
*/
msleep(1);
}
static void dss_dpll_power_disable(struct dss_video_pll *vpll)
{
REG_MOD(vpll->clkctrl_base, 0, 31, 30); /* PLL_POWER_OFF */
}
static int dss_video_pll_enable(struct dss_pll *pll)
{
struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll);
int r;
r = dss_runtime_get(pll->dss);
if (r)
return r;
dss_ctrl_pll_enable(pll, true);
dss_dpll_enable_scp_clk(vpll);
r = dss_pll_wait_reset_done(pll);
if (r)
goto err_reset;
dss_dpll_power_enable(vpll);
return 0;
err_reset:
dss_dpll_disable_scp_clk(vpll);
dss_ctrl_pll_enable(pll, false);
dss_runtime_put(pll->dss);
return r;
}
static void dss_video_pll_disable(struct dss_pll *pll)
{
struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll);
dss_dpll_power_disable(vpll);
dss_dpll_disable_scp_clk(vpll);
dss_ctrl_pll_enable(pll, false);
dss_runtime_put(pll->dss);
}
static const struct dss_pll_ops dss_pll_ops = {
.enable = dss_video_pll_enable,
.disable = dss_video_pll_disable,
.set_config = dss_pll_write_config_type_a,
};
static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.type = DSS_PLL_TYPE_A,
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
.fint_min = 500000,
.fint_max = 2500000,
.clkdco_max = 1800000000,
.n_msb = 8,
.n_lsb = 1,
.m_msb = 20,
.m_lsb = 9,
.mX_msb[0] = 25,
.mX_lsb[0] = 21,
.mX_msb[1] = 30,
.mX_lsb[1] = 26,
.mX_msb[2] = 4,
.mX_lsb[2] = 0,
.mX_msb[3] = 9,
.mX_lsb[3] = 5,
.has_refsel = true,
.errata_i886 = true,
.errata_i932 = true,
};
struct dss_pll *dss_video_pll_init(struct dss_device *dss,
struct platform_device *pdev, int id,
struct regulator *regulator)
{
const char * const reg_name[] = { "pll1", "pll2" };
const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
const char * const clkin_name[] = { "video1_clk", "video2_clk" };
struct dss_video_pll *vpll;
void __iomem *pll_base, *clkctrl_base;
struct clk *clk;
struct dss_pll *pll;
int r;
/* PLL CONTROL */
pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]);
if (IS_ERR(pll_base))
return ERR_CAST(pll_base);
/* CLOCK CONTROL */
clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]);
if (IS_ERR(clkctrl_base))
return ERR_CAST(clkctrl_base);
/* CLKIN */
clk = devm_clk_get(&pdev->dev, clkin_name[id]);
if (IS_ERR(clk)) {
DSSERR("can't get video pll clkin\n");
return ERR_CAST(clk);
}
vpll = devm_kzalloc(&pdev->dev, sizeof(*vpll), GFP_KERNEL);
if (!vpll)
return ERR_PTR(-ENOMEM);
vpll->dev = &pdev->dev;
vpll->clkctrl_base = clkctrl_base;
pll = &vpll->pll;
pll->name = id == 0 ? "video0" : "video1";
pll->id = id == 0 ? DSS_PLL_VIDEO1 : DSS_PLL_VIDEO2;
pll->clkin = clk;
pll->regulator = regulator;
pll->base = pll_base;
pll->hw = &dss_dra7_video_pll_hw;
pll->ops = &dss_pll_ops;
r = dss_pll_register(dss, pll);
if (r)
return ERR_PTR(r);
return pll;
}
void dss_video_pll_uninit(struct dss_pll *pll)
{
dss_pll_unregister(pll);
}
| linux-master | drivers/gpu/drm/omapdrm/dss/video-pll.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*/
#define DSS_SUBSYS_NAME "DISPC"
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_blend.h>
#include "omapdss.h"
#include "dss.h"
#include "dispc.h"
struct dispc_device;
/* DISPC */
#define DISPC_SZ_REGS SZ_4K
enum omap_burst_size {
BURST_SIZE_X2 = 0,
BURST_SIZE_X4 = 1,
BURST_SIZE_X8 = 2,
};
#define REG_GET(dispc, idx, start, end) \
FLD_GET(dispc_read_reg(dispc, idx), start, end)
#define REG_FLD_MOD(dispc, idx, val, start, end) \
dispc_write_reg(dispc, idx, \
FLD_MOD(dispc_read_reg(dispc, idx), val, start, end))
/* DISPC has feature id */
enum dispc_feature_id {
FEAT_LCDENABLEPOL,
FEAT_LCDENABLESIGNAL,
FEAT_PCKFREEENABLE,
FEAT_FUNCGATED,
FEAT_MGR_LCD2,
FEAT_MGR_LCD3,
FEAT_LINEBUFFERSPLIT,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
/* Independent core clk divider */
FEAT_CORE_CLK_DIV,
FEAT_HANDLE_UV_SEPARATE,
FEAT_ATTR2,
FEAT_CPR,
FEAT_PRELOAD,
FEAT_FIR_COEF_V,
FEAT_ALPHA_FIXED_ZORDER,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
/* An unknown HW bug causing the normal FIFO thresholds not to work */
FEAT_OMAP3_DSI_FIFO_BUG,
FEAT_BURST_2D,
FEAT_MFLAG,
};
struct dispc_features {
u8 sw_start;
u8 fp_start;
u8 bp_start;
u16 sw_max;
u16 vp_max;
u16 hp_max;
u8 mgr_width_start;
u8 mgr_height_start;
u16 mgr_width_max;
u16 mgr_height_max;
u16 ovl_width_max;
u16 ovl_height_max;
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
unsigned int max_downscale;
unsigned int max_line_width;
unsigned int min_pcd;
int (*calc_scaling)(struct dispc_device *dispc,
unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem);
unsigned long (*calc_core_clk) (unsigned long pclk,
u16 width, u16 height, u16 out_width, u16 out_height,
bool mem_to_mem);
u8 num_fifos;
const enum dispc_feature_id *features;
unsigned int num_features;
const struct dss_reg_field *reg_fields;
const unsigned int num_reg_fields;
const enum omap_overlay_caps *overlay_caps;
const u32 **supported_color_modes;
const u32 *supported_scaler_color_modes;
unsigned int num_mgrs;
unsigned int num_ovls;
unsigned int buffer_size_unit;
unsigned int burst_size_unit;
/* swap GFX & WB fifos */
bool gfx_fifo_workaround:1;
/* no DISPC_IRQ_FRAMEDONETV on this SoC */
bool no_framedone_tv:1;
/* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
bool mstandby_workaround:1;
bool set_max_preload:1;
/* PIXEL_INC is not added to the last pixel of a line */
bool last_pixel_inc_missing:1;
/* POL_FREQ has ALIGN bit */
bool supports_sync_align:1;
bool has_writeback:1;
bool supports_double_pixel:1;
/*
* Field order for VENC is different than HDMI. We should handle this in
* some intelligent manner, but as the SoCs have either HDMI or VENC,
* never both, we can just use this flag for now.
*/
bool reverse_ilace_field_order:1;
bool has_gamma_table:1;
bool has_gamma_i734_bug:1;
};
#define DISPC_MAX_NR_FIFOS 5
#define DISPC_MAX_CHANNEL_GAMMA 4
struct dispc_device {
struct platform_device *pdev;
void __iomem *base;
struct dss_device *dss;
struct dss_debugfs_entry *debugfs;
int irq;
irq_handler_t user_handler;
void *user_data;
unsigned long core_clk_rate;
unsigned long tv_pclk_rate;
u32 fifo_size[DISPC_MAX_NR_FIFOS];
/* maps which plane is using a fifo. fifo-id -> plane-id */
int fifo_assignment[DISPC_MAX_NR_FIFOS];
bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
const struct dispc_features *feat;
bool is_enabled;
struct regmap *syscon_pol;
u32 syscon_pol_offset;
};
enum omap_color_component {
/* used for all color formats for OMAP3 and earlier
* and for RGB and Y color component on OMAP4
*/
DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0,
/* used for UV component for
* DRM_FORMAT_YUYV, DRM_FORMAT_UYVY, DRM_FORMAT_NV12
* color formats on OMAP4
*/
DISPC_COLOR_COMPONENT_UV = 1 << 1,
};
enum mgr_reg_fields {
DISPC_MGR_FLD_ENABLE,
DISPC_MGR_FLD_STNTFT,
DISPC_MGR_FLD_GO,
DISPC_MGR_FLD_TFTDATALINES,
DISPC_MGR_FLD_STALLMODE,
DISPC_MGR_FLD_TCKENABLE,
DISPC_MGR_FLD_TCKSELECTION,
DISPC_MGR_FLD_CPR,
DISPC_MGR_FLD_FIFOHANDCHECK,
/* used to maintain a count of the above fields */
DISPC_MGR_FLD_NUM,
};
/* DISPC register field id */
enum dispc_feat_reg_field {
FEAT_REG_FIRHINC,
FEAT_REG_FIRVINC,
FEAT_REG_FIFOHIGHTHRESHOLD,
FEAT_REG_FIFOLOWTHRESHOLD,
FEAT_REG_FIFOSIZE,
FEAT_REG_HORIZONTALACCU,
FEAT_REG_VERTICALACCU,
};
struct dispc_reg_field {
u16 reg;
u8 high;
u8 low;
};
struct dispc_gamma_desc {
u32 len;
u32 bits;
u16 reg;
bool has_index;
};
static const struct {
const char *name;
u32 vsync_irq;
u32 framedone_irq;
u32 sync_lost_irq;
struct dispc_gamma_desc gamma;
struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
} mgr_desc[] = {
[OMAP_DSS_CHANNEL_LCD] = {
.name = "LCD",
.vsync_irq = DISPC_IRQ_VSYNC,
.framedone_irq = DISPC_IRQ_FRAMEDONE,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST,
.gamma = {
.len = 256,
.bits = 8,
.reg = DISPC_GAMMA_TABLE0,
.has_index = true,
},
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
[DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 5, 5 },
[DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL, 9, 8 },
[DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL, 11, 11 },
[DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 10, 10 },
[DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 11, 11 },
[DISPC_MGR_FLD_CPR] = { DISPC_CONFIG, 15, 15 },
[DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 },
},
},
[OMAP_DSS_CHANNEL_DIGIT] = {
.name = "DIGIT",
.vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
.framedone_irq = DISPC_IRQ_FRAMEDONETV,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
.gamma = {
.len = 1024,
.bits = 10,
.reg = DISPC_GAMMA_TABLE2,
.has_index = false,
},
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
[DISPC_MGR_FLD_STNTFT] = { },
[DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 6, 6 },
[DISPC_MGR_FLD_TFTDATALINES] = { },
[DISPC_MGR_FLD_STALLMODE] = { },
[DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 12, 12 },
[DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 13, 13 },
[DISPC_MGR_FLD_CPR] = { },
[DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 },
},
},
[OMAP_DSS_CHANNEL_LCD2] = {
.name = "LCD2",
.vsync_irq = DISPC_IRQ_VSYNC2,
.framedone_irq = DISPC_IRQ_FRAMEDONE2,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
.gamma = {
.len = 256,
.bits = 8,
.reg = DISPC_GAMMA_TABLE1,
.has_index = true,
},
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
[DISPC_MGR_FLD_GO] = { DISPC_CONTROL2, 5, 5 },
[DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL2, 9, 8 },
[DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL2, 11, 11 },
[DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG2, 10, 10 },
[DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG2, 11, 11 },
[DISPC_MGR_FLD_CPR] = { DISPC_CONFIG2, 15, 15 },
[DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG2, 16, 16 },
},
},
[OMAP_DSS_CHANNEL_LCD3] = {
.name = "LCD3",
.vsync_irq = DISPC_IRQ_VSYNC3,
.framedone_irq = DISPC_IRQ_FRAMEDONE3,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
.gamma = {
.len = 256,
.bits = 8,
.reg = DISPC_GAMMA_TABLE3,
.has_index = true,
},
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
[DISPC_MGR_FLD_GO] = { DISPC_CONTROL3, 5, 5 },
[DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL3, 9, 8 },
[DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL3, 11, 11 },
[DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG3, 10, 10 },
[DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG3, 11, 11 },
[DISPC_MGR_FLD_CPR] = { DISPC_CONFIG3, 15, 15 },
[DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG3, 16, 16 },
},
},
};
static unsigned long dispc_fclk_rate(struct dispc_device *dispc);
static unsigned long dispc_core_clk_rate(struct dispc_device *dispc);
static unsigned long dispc_mgr_lclk_rate(struct dispc_device *dispc,
enum omap_channel channel);
static unsigned long dispc_mgr_pclk_rate(struct dispc_device *dispc,
enum omap_channel channel);
static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane);
static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane);
static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
{
__raw_writel(val, dispc->base + idx);
}
static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
{
return __raw_readl(dispc->base + idx);
}
static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld)
{
const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
return REG_GET(dispc, rfld->reg, rfld->high, rfld->low);
}
static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld, int val)
{
const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
int dispc_get_num_ovls(struct dispc_device *dispc)
{
return dispc->feat->num_ovls;
}
int dispc_get_num_mgrs(struct dispc_device *dispc)
{
return dispc->feat->num_mgrs;
}
static void dispc_get_reg_field(struct dispc_device *dispc,
enum dispc_feat_reg_field id,
u8 *start, u8 *end)
{
BUG_ON(id >= dispc->feat->num_reg_fields);
*start = dispc->feat->reg_fields[id].start;
*end = dispc->feat->reg_fields[id].end;
}
static bool dispc_has_feature(struct dispc_device *dispc,
enum dispc_feature_id id)
{
unsigned int i;
for (i = 0; i < dispc->feat->num_features; i++) {
if (dispc->feat->features[i] == id)
return true;
}
return false;
}
#define SR(dispc, reg) \
dispc->ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(dispc, DISPC_##reg)
#define RR(dispc, reg) \
dispc_write_reg(dispc, DISPC_##reg, dispc->ctx[DISPC_##reg / sizeof(u32)])
static void dispc_save_context(struct dispc_device *dispc)
{
int i, j;
DSSDBG("dispc_save_context\n");
SR(dispc, IRQENABLE);
SR(dispc, CONTROL);
SR(dispc, CONFIG);
SR(dispc, LINE_NUMBER);
if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
SR(dispc, GLOBAL_ALPHA);
if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
SR(dispc, CONTROL2);
SR(dispc, CONFIG2);
}
if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
SR(dispc, CONTROL3);
SR(dispc, CONFIG3);
}
for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
SR(dispc, DEFAULT_COLOR(i));
SR(dispc, TRANS_COLOR(i));
SR(dispc, SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
SR(dispc, TIMING_H(i));
SR(dispc, TIMING_V(i));
SR(dispc, POL_FREQ(i));
SR(dispc, DIVISORo(i));
SR(dispc, DATA_CYCLE1(i));
SR(dispc, DATA_CYCLE2(i));
SR(dispc, DATA_CYCLE3(i));
if (dispc_has_feature(dispc, FEAT_CPR)) {
SR(dispc, CPR_COEF_R(i));
SR(dispc, CPR_COEF_G(i));
SR(dispc, CPR_COEF_B(i));
}
}
for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
SR(dispc, OVL_BA0(i));
SR(dispc, OVL_BA1(i));
SR(dispc, OVL_POSITION(i));
SR(dispc, OVL_SIZE(i));
SR(dispc, OVL_ATTRIBUTES(i));
SR(dispc, OVL_FIFO_THRESHOLD(i));
SR(dispc, OVL_ROW_INC(i));
SR(dispc, OVL_PIXEL_INC(i));
if (dispc_has_feature(dispc, FEAT_PRELOAD))
SR(dispc, OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
SR(dispc, OVL_WINDOW_SKIP(i));
SR(dispc, OVL_TABLE_BA(i));
continue;
}
SR(dispc, OVL_FIR(i));
SR(dispc, OVL_PICTURE_SIZE(i));
SR(dispc, OVL_ACCU0(i));
SR(dispc, OVL_ACCU1(i));
for (j = 0; j < 8; j++)
SR(dispc, OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
SR(dispc, OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
SR(dispc, OVL_CONV_COEF(i, j));
if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
SR(dispc, OVL_FIR_COEF_V(i, j));
}
if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
SR(dispc, OVL_BA0_UV(i));
SR(dispc, OVL_BA1_UV(i));
SR(dispc, OVL_FIR2(i));
SR(dispc, OVL_ACCU2_0(i));
SR(dispc, OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
SR(dispc, OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
SR(dispc, OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
SR(dispc, OVL_FIR_COEF_V2(i, j));
}
if (dispc_has_feature(dispc, FEAT_ATTR2))
SR(dispc, OVL_ATTRIBUTES2(i));
}
if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
SR(dispc, DIVISOR);
dispc->ctx_valid = true;
DSSDBG("context saved\n");
}
static void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
DSSDBG("dispc_restore_context\n");
if (!dispc->ctx_valid)
return;
/*RR(dispc, IRQENABLE);*/
/*RR(dispc, CONTROL);*/
RR(dispc, CONFIG);
RR(dispc, LINE_NUMBER);
if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
RR(dispc, GLOBAL_ALPHA);
if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
RR(dispc, CONFIG2);
if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
RR(dispc, CONFIG3);
for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
RR(dispc, DEFAULT_COLOR(i));
RR(dispc, TRANS_COLOR(i));
RR(dispc, SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
RR(dispc, TIMING_H(i));
RR(dispc, TIMING_V(i));
RR(dispc, POL_FREQ(i));
RR(dispc, DIVISORo(i));
RR(dispc, DATA_CYCLE1(i));
RR(dispc, DATA_CYCLE2(i));
RR(dispc, DATA_CYCLE3(i));
if (dispc_has_feature(dispc, FEAT_CPR)) {
RR(dispc, CPR_COEF_R(i));
RR(dispc, CPR_COEF_G(i));
RR(dispc, CPR_COEF_B(i));
}
}
for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
RR(dispc, OVL_BA0(i));
RR(dispc, OVL_BA1(i));
RR(dispc, OVL_POSITION(i));
RR(dispc, OVL_SIZE(i));
RR(dispc, OVL_ATTRIBUTES(i));
RR(dispc, OVL_FIFO_THRESHOLD(i));
RR(dispc, OVL_ROW_INC(i));
RR(dispc, OVL_PIXEL_INC(i));
if (dispc_has_feature(dispc, FEAT_PRELOAD))
RR(dispc, OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
RR(dispc, OVL_WINDOW_SKIP(i));
RR(dispc, OVL_TABLE_BA(i));
continue;
}
RR(dispc, OVL_FIR(i));
RR(dispc, OVL_PICTURE_SIZE(i));
RR(dispc, OVL_ACCU0(i));
RR(dispc, OVL_ACCU1(i));
for (j = 0; j < 8; j++)
RR(dispc, OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
RR(dispc, OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
RR(dispc, OVL_CONV_COEF(i, j));
if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
RR(dispc, OVL_FIR_COEF_V(i, j));
}
if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
RR(dispc, OVL_BA0_UV(i));
RR(dispc, OVL_BA1_UV(i));
RR(dispc, OVL_FIR2(i));
RR(dispc, OVL_ACCU2_0(i));
RR(dispc, OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
RR(dispc, OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
RR(dispc, OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
RR(dispc, OVL_FIR_COEF_V2(i, j));
}
if (dispc_has_feature(dispc, FEAT_ATTR2))
RR(dispc, OVL_ATTRIBUTES2(i));
}
if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
RR(dispc, DIVISOR);
/* enable last, because LCD & DIGIT enable are here */
RR(dispc, CONTROL);
if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
RR(dispc, CONTROL2);
if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
RR(dispc, CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
dispc_clear_irqstatus(dispc, DISPC_IRQ_SYNC_LOST_DIGIT);
/*
* enable last so IRQs won't trigger before
* the context is fully restored
*/
RR(dispc, IRQENABLE);
DSSDBG("context restored\n");
}
#undef SR
#undef RR
int dispc_runtime_get(struct dispc_device *dispc)
{
int r;
DSSDBG("dispc_runtime_get\n");
r = pm_runtime_get_sync(&dispc->pdev->dev);
if (WARN_ON(r < 0)) {
pm_runtime_put_noidle(&dispc->pdev->dev);
return r;
}
return 0;
}
void dispc_runtime_put(struct dispc_device *dispc)
{
int r;
DSSDBG("dispc_runtime_put\n");
r = pm_runtime_put_sync(&dispc->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
return 0;
return mgr_desc[channel].framedone_irq;
}
u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].sync_lost_irq;
}
u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
/* flush posted write */
mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
enum omap_channel channel)
{
return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
bool dispc_mgr_go_busy(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
}
void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
{
WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
WARN_ON(dispc_mgr_go_busy(dispc, channel));
DSSDBG("GO %s\n", mgr_desc[channel].name);
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
bool dispc_wb_go_busy(struct dispc_device *dispc)
{
return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
if (!enable)
return;
go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
if (go) {
DSSERR("GO bit not down for WB\n");
return;
}
REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6);
}
static void dispc_ovl_write_firh_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
{
dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_H(plane, reg), value);
}
static void dispc_ovl_write_firhv_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
{
dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_HV(plane, reg), value);
}
static void dispc_ovl_write_firv_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
{
dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_V(plane, reg), value);
}
static void dispc_ovl_write_firh2_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_H2(plane, reg), value);
}
static void dispc_ovl_write_firhv2_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
}
static void dispc_ovl_write_firv2_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
static void dispc_ovl_set_scale_coef(struct dispc_device *dispc,
enum omap_plane_id plane, int fir_hinc,
int fir_vinc, int five_taps,
enum omap_color_component color_comp)
{
const struct dispc_coef *h_coef, *v_coef;
int i;
h_coef = dispc_ovl_get_scale_coef(fir_hinc, true);
v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps);
if (!h_coef || !v_coef) {
dev_err(&dispc->pdev->dev, "%s: failed to find scale coefs\n",
__func__);
return;
}
for (i = 0; i < 8; i++) {
u32 h, hv;
h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0)
| FLD_VAL(h_coef[i].hc1_vc0, 15, 8)
| FLD_VAL(h_coef[i].hc2_vc1, 23, 16)
| FLD_VAL(h_coef[i].hc3_vc2, 31, 24);
hv = FLD_VAL(h_coef[i].hc4_vc22, 7, 0)
| FLD_VAL(v_coef[i].hc1_vc0, 15, 8)
| FLD_VAL(v_coef[i].hc2_vc1, 23, 16)
| FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
dispc_ovl_write_firh_reg(dispc, plane, i, h);
dispc_ovl_write_firhv_reg(dispc, plane, i, hv);
} else {
dispc_ovl_write_firh2_reg(dispc, plane, i, h);
dispc_ovl_write_firhv2_reg(dispc, plane, i, hv);
}
}
if (five_taps) {
for (i = 0; i < 8; i++) {
u32 v;
v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
| FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
dispc_ovl_write_firv_reg(dispc, plane, i, v);
else
dispc_ovl_write_firv2_reg(dispc, plane, i, v);
}
}
}
struct csc_coef_yuv2rgb {
int ry, rcb, rcr, gy, gcb, gcr, by, bcb, bcr;
bool full_range;
};
static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct csc_coef_yuv2rgb *ct)
{
#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
#undef CVAL
}
/* YUV -> RGB, ITU-R BT.601, full range */
static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = {
256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
256, 452, 0, /* by, bcb, bcr |1.000 1.772 0.000|*/
true, /* full range */
};
/* YUV -> RGB, ITU-R BT.601, limited range */
static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
298, 516, 0, /* by, bcb, bcr |1.164 2.017 0.000|*/
false, /* limited range */
};
/* YUV -> RGB, ITU-R BT.709, full range */
static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = {
256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
256, 475, 0, /* by, bcb, bcr |1.000 1.856 0.000|*/
true, /* full range */
};
/* YUV -> RGB, ITU-R BT.709, limited range */
static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = {
298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
298, 541, 0, /* by, bcb, bcr |1.164 2.112 0.000|*/
false, /* limited range */
};
static void dispc_ovl_set_csc(struct dispc_device *dispc,
enum omap_plane_id plane,
enum drm_color_encoding color_encoding,
enum drm_color_range color_range)
{
const struct csc_coef_yuv2rgb *csc;
switch (color_encoding) {
default:
case DRM_COLOR_YCBCR_BT601:
if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
csc = &coefs_yuv2rgb_bt601_full;
else
csc = &coefs_yuv2rgb_bt601_lim;
break;
case DRM_COLOR_YCBCR_BT709:
if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
csc = &coefs_yuv2rgb_bt709_full;
else
csc = &coefs_yuv2rgb_bt709_lim;
break;
}
dispc_ovl_write_color_conv_coef(dispc, plane, csc);
}
static void dispc_ovl_set_ba0(struct dispc_device *dispc,
enum omap_plane_id plane, u32 paddr)
{
dispc_write_reg(dispc, DISPC_OVL_BA0(plane), paddr);
}
static void dispc_ovl_set_ba1(struct dispc_device *dispc,
enum omap_plane_id plane, u32 paddr)
{
dispc_write_reg(dispc, DISPC_OVL_BA1(plane), paddr);
}
static void dispc_ovl_set_ba0_uv(struct dispc_device *dispc,
enum omap_plane_id plane, u32 paddr)
{
dispc_write_reg(dispc, DISPC_OVL_BA0_UV(plane), paddr);
}
static void dispc_ovl_set_ba1_uv(struct dispc_device *dispc,
enum omap_plane_id plane, u32 paddr)
{
dispc_write_reg(dispc, DISPC_OVL_BA1_UV(plane), paddr);
}
static void dispc_ovl_set_pos(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps, int x, int y)
{
u32 val;
if ((caps & OMAP_DSS_OVL_CAP_POS) == 0)
return;
val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
dispc_write_reg(dispc, DISPC_OVL_POSITION(plane), val);
}
static void dispc_ovl_set_input_size(struct dispc_device *dispc,
enum omap_plane_id plane, int width,
int height)
{
u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_GFX || plane == OMAP_DSS_WB)
dispc_write_reg(dispc, DISPC_OVL_SIZE(plane), val);
else
dispc_write_reg(dispc, DISPC_OVL_PICTURE_SIZE(plane), val);
}
static void dispc_ovl_set_output_size(struct dispc_device *dispc,
enum omap_plane_id plane, int width,
int height)
{
u32 val;
BUG_ON(plane == OMAP_DSS_GFX);
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_WB)
dispc_write_reg(dispc, DISPC_OVL_PICTURE_SIZE(plane), val);
else
dispc_write_reg(dispc, DISPC_OVL_SIZE(plane), val);
}
static void dispc_ovl_set_zorder(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps, u8 zorder)
{
if ((caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
return;
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
}
static void dispc_ovl_enable_zorder_planes(struct dispc_device *dispc)
{
int i;
if (!dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
return;
for (i = 0; i < dispc_get_num_ovls(dispc); i++)
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
static void dispc_ovl_set_pre_mult_alpha(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
bool enable)
{
if ((caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
return;
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
}
static void dispc_ovl_setup_global_alpha(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
u8 global_alpha)
{
static const unsigned int shifts[] = { 0, 8, 16, 24, };
int shift;
if ((caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
return;
shift = shifts[plane];
REG_FLD_MOD(dispc, DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
}
static void dispc_ovl_set_pix_inc(struct dispc_device *dispc,
enum omap_plane_id plane, s32 inc)
{
dispc_write_reg(dispc, DISPC_OVL_PIXEL_INC(plane), inc);
}
static void dispc_ovl_set_row_inc(struct dispc_device *dispc,
enum omap_plane_id plane, s32 inc)
{
dispc_write_reg(dispc, DISPC_OVL_ROW_INC(plane), inc);
}
static void dispc_ovl_set_color_mode(struct dispc_device *dispc,
enum omap_plane_id plane, u32 fourcc)
{
u32 m = 0;
if (plane != OMAP_DSS_GFX) {
switch (fourcc) {
case DRM_FORMAT_NV12:
m = 0x0; break;
case DRM_FORMAT_XRGB4444:
m = 0x1; break;
case DRM_FORMAT_RGBA4444:
m = 0x2; break;
case DRM_FORMAT_RGBX4444:
m = 0x4; break;
case DRM_FORMAT_ARGB4444:
m = 0x5; break;
case DRM_FORMAT_RGB565:
m = 0x6; break;
case DRM_FORMAT_ARGB1555:
m = 0x7; break;
case DRM_FORMAT_XRGB8888:
m = 0x8; break;
case DRM_FORMAT_RGB888:
m = 0x9; break;
case DRM_FORMAT_YUYV:
m = 0xa; break;
case DRM_FORMAT_UYVY:
m = 0xb; break;
case DRM_FORMAT_ARGB8888:
m = 0xc; break;
case DRM_FORMAT_RGBA8888:
m = 0xd; break;
case DRM_FORMAT_RGBX8888:
m = 0xe; break;
case DRM_FORMAT_XRGB1555:
m = 0xf; break;
default:
BUG(); return;
}
} else {
switch (fourcc) {
case DRM_FORMAT_RGBX4444:
m = 0x4; break;
case DRM_FORMAT_ARGB4444:
m = 0x5; break;
case DRM_FORMAT_RGB565:
m = 0x6; break;
case DRM_FORMAT_ARGB1555:
m = 0x7; break;
case DRM_FORMAT_XRGB8888:
m = 0x8; break;
case DRM_FORMAT_RGB888:
m = 0x9; break;
case DRM_FORMAT_XRGB4444:
m = 0xa; break;
case DRM_FORMAT_RGBA4444:
m = 0xb; break;
case DRM_FORMAT_ARGB8888:
m = 0xc; break;
case DRM_FORMAT_RGBA8888:
m = 0xd; break;
case DRM_FORMAT_RGBX8888:
m = 0xe; break;
case DRM_FORMAT_XRGB1555:
m = 0xf; break;
default:
BUG(); return;
}
}
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
static void dispc_ovl_configure_burst_type(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_dss_rotation_type rotation)
{
if (dispc_has_feature(dispc, FEAT_BURST_2D) == 0)
return;
if (rotation == OMAP_DSS_ROT_TILER)
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
else
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
}
static void dispc_ovl_set_channel_out(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_channel channel)
{
int shift;
u32 val;
int chan = 0, chan2 = 0;
switch (plane) {
case OMAP_DSS_GFX:
shift = 8;
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
shift = 16;
break;
default:
BUG();
return;
}
val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
chan = 0;
chan2 = 0;
break;
case OMAP_DSS_CHANNEL_DIGIT:
chan = 1;
chan2 = 0;
break;
case OMAP_DSS_CHANNEL_LCD2:
chan = 0;
chan2 = 1;
break;
case OMAP_DSS_CHANNEL_LCD3:
if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
chan = 0;
chan2 = 2;
} else {
BUG();
return;
}
break;
case OMAP_DSS_CHANNEL_WB:
chan = 0;
chan2 = 3;
break;
default:
BUG();
return;
}
val = FLD_MOD(val, chan, shift, shift);
val = FLD_MOD(val, chan2, 31, 30);
} else {
val = FLD_MOD(val, channel, shift, shift);
}
dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), val);
}
static enum omap_channel dispc_ovl_get_channel_out(struct dispc_device *dispc,
enum omap_plane_id plane)
{
int shift;
u32 val;
switch (plane) {
case OMAP_DSS_GFX:
shift = 8;
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
shift = 16;
break;
default:
BUG();
return 0;
}
val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
if (FLD_GET(val, shift, shift) == 1)
return OMAP_DSS_CHANNEL_DIGIT;
if (!dispc_has_feature(dispc, FEAT_MGR_LCD2))
return OMAP_DSS_CHANNEL_LCD;
switch (FLD_GET(val, 31, 30)) {
case 0:
default:
return OMAP_DSS_CHANNEL_LCD;
case 1:
return OMAP_DSS_CHANNEL_LCD2;
case 2:
return OMAP_DSS_CHANNEL_LCD3;
case 3:
return OMAP_DSS_CHANNEL_WB;
}
}
static void dispc_ovl_set_burst_size(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_burst_size burst_size)
{
static const unsigned int shifts[] = { 6, 14, 14, 14, 14, };
int shift;
shift = shifts[plane];
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), burst_size,
shift + 1, shift);
}
static void dispc_configure_burst_sizes(struct dispc_device *dispc)
{
int i;
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
for (i = 0; i < dispc_get_num_ovls(dispc); ++i)
dispc_ovl_set_burst_size(dispc, i, burst_size);
if (dispc->feat->has_writeback)
dispc_ovl_set_burst_size(dispc, OMAP_DSS_WB, burst_size);
}
static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc,
enum omap_plane_id plane)
{
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
return dispc->feat->burst_size_unit * 8;
}
bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
enum omap_plane_id plane, u32 fourcc)
{
const u32 *modes;
unsigned int i;
modes = dispc->feat->supported_color_modes[plane];
for (i = 0; modes[i]; ++i) {
if (modes[i] == fourcc)
return true;
}
return false;
}
const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane)
{
return dispc->feat->supported_color_modes[plane];
}
static void dispc_mgr_enable_cpr(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT)
return;
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_CPR, enable);
}
static void dispc_mgr_set_cpr_coef(struct dispc_device *dispc,
enum omap_channel channel,
const struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
if (!dss_mgr_is_lcd(channel))
return;
coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
FLD_VAL(coefs->rb, 9, 0);
coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
FLD_VAL(coefs->gb, 9, 0);
coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
FLD_VAL(coefs->bb, 9, 0);
dispc_write_reg(dispc, DISPC_CPR_COEF_R(channel), coef_r);
dispc_write_reg(dispc, DISPC_CPR_COEF_G(channel), coef_g);
dispc_write_reg(dispc, DISPC_CPR_COEF_B(channel), coef_b);
}
static void dispc_ovl_set_vid_color_conv(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
u32 val;
BUG_ON(plane == OMAP_DSS_GFX);
val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
val = FLD_MOD(val, enable, 9, 9);
dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), val);
}
static void dispc_ovl_enable_replication(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
bool enable)
{
static const unsigned int shifts[] = { 5, 10, 10, 10 };
int shift;
if ((caps & OMAP_DSS_OVL_CAP_REPLICATION) == 0)
return;
shift = shifts[plane];
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
static void dispc_mgr_set_size(struct dispc_device *dispc,
enum omap_channel channel, u16 width, u16 height)
{
u32 val;
val = FLD_VAL(height - 1, dispc->feat->mgr_height_start, 16) |
FLD_VAL(width - 1, dispc->feat->mgr_width_start, 0);
dispc_write_reg(dispc, DISPC_SIZE_MGR(channel), val);
}
static void dispc_init_fifos(struct dispc_device *dispc)
{
u32 size;
int fifo;
u8 start, end;
u32 unit;
int i;
unit = dispc->feat->buffer_size_unit;
dispc_get_reg_field(dispc, FEAT_REG_FIFOSIZE, &start, &end);
for (fifo = 0; fifo < dispc->feat->num_fifos; ++fifo) {
size = REG_GET(dispc, DISPC_OVL_FIFO_SIZE_STATUS(fifo),
start, end);
size *= unit;
dispc->fifo_size[fifo] = size;
/*
* By default fifos are mapped directly to overlays, fifo 0 to
* ovl 0, fifo 1 to ovl 1, etc.
*/
dispc->fifo_assignment[fifo] = fifo;
}
/*
* The GFX fifo on OMAP4 is smaller than the other fifos. The small fifo
* causes problems with certain use cases, like using the tiler in 2D
* mode. The below hack swaps the fifos of GFX and WB planes, thus
* giving GFX plane a larger fifo. WB but should work fine with a
* smaller fifo.
*/
if (dispc->feat->gfx_fifo_workaround) {
u32 v;
v = dispc_read_reg(dispc, DISPC_GLOBAL_BUFFER);
v = FLD_MOD(v, 4, 2, 0); /* GFX BUF top to WB */
v = FLD_MOD(v, 4, 5, 3); /* GFX BUF bottom to WB */
v = FLD_MOD(v, 0, 26, 24); /* WB BUF top to GFX */
v = FLD_MOD(v, 0, 29, 27); /* WB BUF bottom to GFX */
dispc_write_reg(dispc, DISPC_GLOBAL_BUFFER, v);
dispc->fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
dispc->fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
}
/*
* Setup default fifo thresholds.
*/
for (i = 0; i < dispc_get_num_ovls(dispc); ++i) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
dispc_ovl_compute_fifo_thresholds(dispc, i, &low, &high,
use_fifomerge, manual_update);
dispc_ovl_set_fifo_threshold(dispc, i, low, high);
}
if (dispc->feat->has_writeback) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
dispc_ovl_compute_fifo_thresholds(dispc, OMAP_DSS_WB,
&low, &high, use_fifomerge,
manual_update);
dispc_ovl_set_fifo_threshold(dispc, OMAP_DSS_WB, low, high);
}
}
static u32 dispc_ovl_get_fifo_size(struct dispc_device *dispc,
enum omap_plane_id plane)
{
int fifo;
u32 size = 0;
for (fifo = 0; fifo < dispc->feat->num_fifos; ++fifo) {
if (dispc->fifo_assignment[fifo] == plane)
size += dispc->fifo_size[fifo];
}
return size;
}
void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
enum omap_plane_id plane,
u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
unit = dispc->feat->buffer_size_unit;
WARN_ON(low % unit != 0);
WARN_ON(high % unit != 0);
low /= unit;
high /= unit;
dispc_get_reg_field(dispc, FEAT_REG_FIFOHIGHTHRESHOLD,
&hi_start, &hi_end);
dispc_get_reg_field(dispc, FEAT_REG_FIFOLOWTHRESHOLD,
&lo_start, &lo_end);
DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n",
plane,
REG_GET(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
lo_start, lo_end) * unit,
REG_GET(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
hi_start, hi_end) * unit,
low * unit, high * unit);
dispc_write_reg(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
/*
* configure the preload to the pipeline's high threhold, if HT it's too
* large for the preload field, set the threshold to the maximum value
* that can be held by the preload register
*/
if (dispc_has_feature(dispc, FEAT_PRELOAD) &&
dispc->feat->set_max_preload && plane != OMAP_DSS_WB)
dispc_write_reg(dispc, DISPC_OVL_PRELOAD(plane),
min(high, 0xfffu));
}
void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable)
{
if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14);
}
void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
enum omap_plane_id plane,
u32 *fifo_low, u32 *fifo_high,
bool use_fifomerge, bool manual_update)
{
/*
* All sizes are in bytes. Both the buffer and burst are made of
* buffer_units, and the fifo thresholds must be buffer_unit aligned.
*/
unsigned int buf_unit = dispc->feat->buffer_size_unit;
unsigned int ovl_fifo_size, total_fifo_size, burst_size;
int i;
burst_size = dispc_ovl_get_burst_size(dispc, plane);
ovl_fifo_size = dispc_ovl_get_fifo_size(dispc, plane);
if (use_fifomerge) {
total_fifo_size = 0;
for (i = 0; i < dispc_get_num_ovls(dispc); ++i)
total_fifo_size += dispc_ovl_get_fifo_size(dispc, i);
} else {
total_fifo_size = ovl_fifo_size;
}
/*
* We use the same low threshold for both fifomerge and non-fifomerge
* cases, but for fifomerge we calculate the high threshold using the
* combined fifo size
*/
if (manual_update && dispc_has_feature(dispc, FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else if (plane == OMAP_DSS_WB) {
/*
* Most optimal configuration for writeback is to push out data
* to the interconnect the moment writeback pushes enough pixels
* in the FIFO to form a burst
*/
*fifo_low = 0;
*fifo_high = burst_size;
} else {
*fifo_low = ovl_fifo_size - burst_size;
*fifo_high = total_fifo_size - buf_unit;
}
}
static void dispc_ovl_set_mflag(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
int bit;
if (plane == OMAP_DSS_GFX)
bit = 14;
else
bit = 23;
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
}
static void dispc_ovl_set_mflag_threshold(struct dispc_device *dispc,
enum omap_plane_id plane,
int low, int high)
{
dispc_write_reg(dispc, DISPC_OVL_MFLAG_THRESHOLD(plane),
FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
}
static void dispc_init_mflag(struct dispc_device *dispc)
{
int i;
/*
* HACK: NV12 color format and MFLAG seem to have problems working
* together: using two displays, and having an NV12 overlay on one of
* the displays will cause underflows/synclosts when MFLAG_CTRL=2.
* Changing MFLAG thresholds and PRELOAD to certain values seem to
* remove the errors, but there doesn't seem to be a clear logic on
* which values work and which not.
*
* As a work-around, set force MFLAG to always on.
*/
dispc_write_reg(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE,
(1 << 0) | /* MFLAG_CTRL = force always on */
(0 << 2)); /* MFLAG_START = disable */
for (i = 0; i < dispc_get_num_ovls(dispc); ++i) {
u32 size = dispc_ovl_get_fifo_size(dispc, i);
u32 unit = dispc->feat->buffer_size_unit;
u32 low, high;
dispc_ovl_set_mflag(dispc, i, true);
/*
* Simulation team suggests below thesholds:
* HT = fifosize * 5 / 8;
* LT = fifosize * 4 / 8;
*/
low = size * 4 / 8 / unit;
high = size * 5 / 8 / unit;
dispc_ovl_set_mflag_threshold(dispc, i, low, high);
}
if (dispc->feat->has_writeback) {
u32 size = dispc_ovl_get_fifo_size(dispc, OMAP_DSS_WB);
u32 unit = dispc->feat->buffer_size_unit;
u32 low, high;
dispc_ovl_set_mflag(dispc, OMAP_DSS_WB, true);
/*
* Simulation team suggests below thesholds:
* HT = fifosize * 5 / 8;
* LT = fifosize * 4 / 8;
*/
low = size * 4 / 8 / unit;
high = size * 5 / 8 / unit;
dispc_ovl_set_mflag_threshold(dispc, OMAP_DSS_WB, low, high);
}
}
static void dispc_ovl_set_fir(struct dispc_device *dispc,
enum omap_plane_id plane,
int hinc, int vinc,
enum omap_color_component color_comp)
{
u32 val;
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
u8 hinc_start, hinc_end, vinc_start, vinc_end;
dispc_get_reg_field(dispc, FEAT_REG_FIRHINC,
&hinc_start, &hinc_end);
dispc_get_reg_field(dispc, FEAT_REG_FIRVINC,
&vinc_start, &vinc_end);
val = FLD_VAL(vinc, vinc_start, vinc_end) |
FLD_VAL(hinc, hinc_start, hinc_end);
dispc_write_reg(dispc, DISPC_OVL_FIR(plane), val);
} else {
val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
dispc_write_reg(dispc, DISPC_OVL_FIR2(plane), val);
}
}
static void dispc_ovl_set_vid_accu0(struct dispc_device *dispc,
enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
dispc_get_reg_field(dispc, FEAT_REG_HORIZONTALACCU,
&hor_start, &hor_end);
dispc_get_reg_field(dispc, FEAT_REG_VERTICALACCU,
&vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
dispc_write_reg(dispc, DISPC_OVL_ACCU0(plane), val);
}
static void dispc_ovl_set_vid_accu1(struct dispc_device *dispc,
enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
dispc_get_reg_field(dispc, FEAT_REG_HORIZONTALACCU,
&hor_start, &hor_end);
dispc_get_reg_field(dispc, FEAT_REG_VERTICALACCU,
&vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
dispc_write_reg(dispc, DISPC_OVL_ACCU1(plane), val);
}
static void dispc_ovl_set_vid_accu2_0(struct dispc_device *dispc,
enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
dispc_write_reg(dispc, DISPC_OVL_ACCU2_0(plane), val);
}
static void dispc_ovl_set_vid_accu2_1(struct dispc_device *dispc,
enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
dispc_write_reg(dispc, DISPC_OVL_ACCU2_1(plane), val);
}
static void dispc_ovl_set_scale_param(struct dispc_device *dispc,
enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool five_taps, u8 rotation,
enum omap_color_component color_comp)
{
int fir_hinc, fir_vinc;
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
dispc_ovl_set_scale_coef(dispc, plane, fir_hinc, fir_vinc, five_taps,
color_comp);
dispc_ovl_set_fir(dispc, plane, fir_hinc, fir_vinc, color_comp);
}
static void dispc_ovl_set_accu_uv(struct dispc_device *dispc,
enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, u32 fourcc, u8 rotation)
{
int h_accu2_0, h_accu2_1;
int v_accu2_0, v_accu2_1;
int chroma_hinc, chroma_vinc;
int idx;
struct accu {
s8 h0_m, h0_n;
s8 h1_m, h1_n;
s8 v0_m, v0_n;
s8 v1_m, v1_n;
};
const struct accu *accu_table;
const struct accu *accu_val;
static const struct accu accu_nv12[4] = {
{ 0, 1, 0, 1 , -1, 2, 0, 1 },
{ 1, 2, -3, 4 , 0, 1, 0, 1 },
{ -1, 1, 0, 1 , -1, 2, 0, 1 },
{ -1, 2, -1, 2 , -1, 1, 0, 1 },
};
static const struct accu accu_nv12_ilace[4] = {
{ 0, 1, 0, 1 , -3, 4, -1, 4 },
{ -1, 4, -3, 4 , 0, 1, 0, 1 },
{ -1, 1, 0, 1 , -1, 4, -3, 4 },
{ -3, 4, -3, 4 , -1, 1, 0, 1 },
};
static const struct accu accu_yuv[4] = {
{ 0, 1, 0, 1, 0, 1, 0, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1 },
{ -1, 1, 0, 1, 0, 1, 0, 1 },
{ 0, 1, 0, 1, -1, 1, 0, 1 },
};
/* Note: DSS HW rotates clockwise, DRM_MODE_ROTATE_* counter-clockwise */
switch (rotation & DRM_MODE_ROTATE_MASK) {
default:
case DRM_MODE_ROTATE_0:
idx = 0;
break;
case DRM_MODE_ROTATE_90:
idx = 3;
break;
case DRM_MODE_ROTATE_180:
idx = 2;
break;
case DRM_MODE_ROTATE_270:
idx = 1;
break;
}
switch (fourcc) {
case DRM_FORMAT_NV12:
if (ilace)
accu_table = accu_nv12_ilace;
else
accu_table = accu_nv12;
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
accu_table = accu_yuv;
break;
default:
BUG();
return;
}
accu_val = &accu_table[idx];
chroma_hinc = 1024 * orig_width / out_width;
chroma_vinc = 1024 * orig_height / out_height;
h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
dispc_ovl_set_vid_accu2_0(dispc, plane, h_accu2_0, v_accu2_0);
dispc_ovl_set_vid_accu2_1(dispc, plane, h_accu2_1, v_accu2_1);
}
static void dispc_ovl_set_scaling_common(struct dispc_device *dispc,
enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
bool fieldmode, u32 fourcc,
u8 rotation)
{
int accu0 = 0;
int accu1 = 0;
u32 l;
dispc_ovl_set_scale_param(dispc, plane, orig_width, orig_height,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_RGB_Y);
l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
/* RESIZEENABLE and VERTICALTAPS */
l &= ~((0x3 << 5) | (0x1 << 21));
l |= (orig_width != out_width) ? (1 << 5) : 0;
l |= (orig_height != out_height) ? (1 << 6) : 0;
l |= five_taps ? (1 << 21) : 0;
/* VRESIZECONF and HRESIZECONF */
if (dispc_has_feature(dispc, FEAT_RESIZECONF)) {
l &= ~(0x3 << 7);
l |= (orig_width <= out_width) ? 0 : (1 << 7);
l |= (orig_height <= out_height) ? 0 : (1 << 8);
}
/* LINEBUFFERSPLIT */
if (dispc_has_feature(dispc, FEAT_LINEBUFFERSPLIT)) {
l &= ~(0x1 << 22);
l |= five_taps ? (1 << 22) : 0;
}
dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
/*
* field 0 = even field = bottom field
* field 1 = odd field = top field
*/
if (ilace && !fieldmode) {
accu1 = 0;
accu0 = ((1024 * orig_height / out_height) / 2) & 0x3ff;
if (accu0 >= 1024/2) {
accu1 = 1024/2;
accu0 -= accu1;
}
}
dispc_ovl_set_vid_accu0(dispc, plane, 0, accu0);
dispc_ovl_set_vid_accu1(dispc, plane, 0, accu1);
}
static void dispc_ovl_set_scaling_uv(struct dispc_device *dispc,
enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
bool fieldmode, u32 fourcc,
u8 rotation)
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
const struct drm_format_info *info;
info = drm_format_info(fourcc);
if (!dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE))
return;
if (!info->is_yuv) {
/* reset chroma resampling for RGB formats */
if (plane != OMAP_DSS_WB)
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
0, 8, 8);
return;
}
dispc_ovl_set_accu_uv(dispc, plane, orig_width, orig_height, out_width,
out_height, ilace, fourcc, rotation);
switch (fourcc) {
case DRM_FORMAT_NV12:
if (chroma_upscale) {
/* UV is subsampled by 2 horizontally and vertically */
orig_height >>= 1;
orig_width >>= 1;
} else {
/* UV is downsampled by 2 horizontally and vertically */
orig_height <<= 1;
orig_width <<= 1;
}
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
/* For YUV422 with 90/270 rotation, we don't upsample chroma */
if (!drm_rotation_90_or_270(rotation)) {
if (chroma_upscale)
/* UV is subsampled by 2 horizontally */
orig_width >>= 1;
else
/* UV is downsampled by 2 horizontally */
orig_width <<= 1;
}
/* must use FIR for YUV422 if rotated */
if ((rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0)
scale_x = scale_y = true;
break;
default:
BUG();
return;
}
if (out_width != orig_width)
scale_x = true;
if (out_height != orig_height)
scale_y = true;
dispc_ovl_set_scale_param(dispc, plane, orig_width, orig_height,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_UV);
if (plane != OMAP_DSS_WB)
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
(scale_x || scale_y) ? 1 : 0, 8, 8);
/* set H scaling */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
/* set V scaling */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
}
static void dispc_ovl_set_scaling(struct dispc_device *dispc,
enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
bool fieldmode, u32 fourcc,
u8 rotation)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_ovl_set_scaling_common(dispc, plane, orig_width, orig_height,
out_width, out_height, ilace, five_taps,
fieldmode, fourcc, rotation);
dispc_ovl_set_scaling_uv(dispc, plane, orig_width, orig_height,
out_width, out_height, ilace, five_taps,
fieldmode, fourcc, rotation);
}
static void dispc_ovl_set_rotation_attrs(struct dispc_device *dispc,
enum omap_plane_id plane, u8 rotation,
enum omap_dss_rotation_type rotation_type,
u32 fourcc)
{
bool row_repeat = false;
int vidrot = 0;
/* Note: DSS HW rotates clockwise, DRM_MODE_ROTATE_* counter-clockwise */
if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY) {
if (rotation & DRM_MODE_REFLECT_X) {
switch (rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_0:
vidrot = 2;
break;
case DRM_MODE_ROTATE_90:
vidrot = 1;
break;
case DRM_MODE_ROTATE_180:
vidrot = 0;
break;
case DRM_MODE_ROTATE_270:
vidrot = 3;
break;
}
} else {
switch (rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_0:
vidrot = 0;
break;
case DRM_MODE_ROTATE_90:
vidrot = 3;
break;
case DRM_MODE_ROTATE_180:
vidrot = 2;
break;
case DRM_MODE_ROTATE_270:
vidrot = 1;
break;
}
}
if (drm_rotation_90_or_270(rotation))
row_repeat = true;
else
row_repeat = false;
}
/*
* OMAP4/5 Errata i631:
* NV12 in 1D mode must use ROTATION=1. Otherwise DSS will fetch extra
* rows beyond the framebuffer, which may cause OCP error.
*/
if (fourcc == DRM_FORMAT_NV12 && rotation_type != OMAP_DSS_ROT_TILER)
vidrot = 1;
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
if (dispc_has_feature(dispc, FEAT_ROWREPEATENABLE))
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
if (dispc_ovl_color_mode_supported(dispc, plane, DRM_FORMAT_NV12)) {
bool doublestride =
fourcc == DRM_FORMAT_NV12 &&
rotation_type == OMAP_DSS_ROT_TILER &&
!drm_rotation_90_or_270(rotation);
/* DOUBLESTRIDE */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane),
doublestride, 22, 22);
}
}
static int color_mode_to_bpp(u32 fourcc)
{
switch (fourcc) {
case DRM_FORMAT_NV12:
return 8;
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
return 16;
case DRM_FORMAT_RGB888:
return 24;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
return 32;
default:
BUG();
return 0;
}
}
static s32 pixinc(int pixels, u8 ps)
{
if (pixels == 1)
return 1;
else if (pixels > 1)
return 1 + (pixels - 1) * ps;
else if (pixels < 0)
return 1 - (-pixels + 1) * ps;
BUG();
}
static void calc_offset(u16 screen_width, u16 width,
u32 fourcc, bool fieldmode, unsigned int field_offset,
unsigned int *offset0, unsigned int *offset1,
s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim,
enum omap_dss_rotation_type rotation_type, u8 rotation)
{
u8 ps;
ps = color_mode_to_bpp(fourcc) / 8;
DSSDBG("scrw %d, width %d\n", screen_width, width);
if (rotation_type == OMAP_DSS_ROT_TILER &&
(fourcc == DRM_FORMAT_UYVY || fourcc == DRM_FORMAT_YUYV) &&
drm_rotation_90_or_270(rotation)) {
/*
* HACK: ROW_INC needs to be calculated with TILER units.
* We get such 'screen_width' that multiplying it with the
* YUV422 pixel size gives the correct TILER container width.
* However, 'width' is in pixels and multiplying it with YUV422
* pixel size gives incorrect result. We thus multiply it here
* with 2 to match the 32 bit TILER unit size.
*/
width *= 2;
}
/*
* field 0 = even field = bottom field
* field 1 = odd field = top field
*/
*offset0 = field_offset * screen_width * ps;
*offset1 = 0;
*row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
(fieldmode ? screen_width : 0), ps);
if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
*pix_inc = pixinc(x_predecim, 2 * ps);
else
*pix_inc = pixinc(x_predecim, ps);
}
/*
* This function is used to avoid synclosts in OMAP3, because of some
* undocumented horizontal position and timing related limitations.
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
const struct videomode *vm, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height,
bool five_taps)
{
const int ds = DIV_ROUND_UP(height, out_height);
unsigned long nonactive;
static const u8 limits[3] = { 8, 10, 20 };
u64 val, blank;
int i;
nonactive = vm->hactive + vm->hfront_porch + vm->hsync_len +
vm->hback_porch - out_width;
i = 0;
if (out_height < height)
i++;
if (out_width < width)
i++;
blank = div_u64((u64)(vm->hback_porch + vm->hsync_len + vm->hfront_porch) *
lclk, pclk);
DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
if (blank <= limits[i])
return -EINVAL;
/* FIXME add checks for 3-tap filter once the limitations are known */
if (!five_taps)
return 0;
/*
* Pixel data should be prepared before visible display point starts.
* So, atleast DS-2 lines must have already been fetched by DISPC
* during nonactive - pos_x period.
*/
val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
val, max(0, ds - 2) * width);
if (val < max(0, ds - 2) * width)
return -EINVAL;
/*
* All lines need to be refilled during the nonactive period of which
* only one line can be loaded during the active period. So, atleast
* DS - 1 lines should be loaded during nonactive period.
*/
val = div_u64((u64)nonactive * lclk, pclk);
DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
val, max(0, ds - 1) * width);
if (val < max(0, ds - 1) * width)
return -EINVAL;
return 0;
}
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
u32 fourcc)
{
u32 core_clk = 0;
u64 tmp;
if (height <= out_height && width <= out_width)
return (unsigned long) pclk;
if (height > out_height) {
unsigned int ppl = vm->hactive;
tmp = (u64)pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
core_clk = tmp;
if (height > 2 * out_height) {
if (ppl == out_width)
return 0;
tmp = (u64)pclk * (height - 2 * out_height) * out_width;
do_div(tmp, 2 * out_height * (ppl - out_width));
core_clk = max_t(u32, core_clk, tmp);
}
}
if (width > out_width) {
tmp = (u64)pclk * width;
do_div(tmp, out_width);
core_clk = max_t(u32, core_clk, tmp);
if (fourcc == DRM_FORMAT_XRGB8888)
core_clk <<= 1;
}
return core_clk;
}
static unsigned long calc_core_clk_24xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
if (height > out_height && width > out_width)
return pclk * 4;
else
return pclk * 2;
}
static unsigned long calc_core_clk_34xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
unsigned int hf, vf;
/*
* FIXME how to determine the 'A' factor
* for the no downscaling case ?
*/
if (width > 3 * out_width)
hf = 4;
else if (width > 2 * out_width)
hf = 3;
else if (width > out_width)
hf = 2;
else
hf = 1;
if (height > out_height)
vf = 2;
else
vf = 1;
return pclk * vf * hf;
}
static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
/*
* If the overlay/writeback is in mem to mem mode, there are no
* downscaling limitations with respect to pixel clock, return 1 as
* required core clock to represent that we have sufficient enough
* core clock to do maximum downscaling
*/
if (mem_to_mem)
return 1;
if (width > out_width)
return DIV_ROUND_UP(pclk, out_width) * width;
else
return pclk;
}
static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc,
unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height,
u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim,
int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk,
bool mem_to_mem)
{
int error;
u16 in_width, in_height;
int min_factor = min(*decim_x, *decim_y);
const int maxsinglelinewidth = dispc->feat->max_line_width;
*five_taps = false;
do {
in_height = height / *decim_y;
in_width = width / *decim_x;
*core_clk = dispc->feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
*core_clk > dispc_core_clk_rate(dispc));
if (error) {
if (*decim_x == *decim_y) {
*decim_x = min_factor;
++*decim_y;
} else {
swap(*decim_x, *decim_y);
if (*decim_x < *decim_y)
++*decim_x;
}
}
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
if (error) {
DSSERR("failed to find scaling settings\n");
return -EINVAL;
}
if (in_width > maxsinglelinewidth) {
DSSERR("Cannot scale max input width exceeded\n");
return -EINVAL;
}
return 0;
}
static int dispc_ovl_calc_scaling_34xx(struct dispc_device *dispc,
unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height,
u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim,
int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk,
bool mem_to_mem)
{
int error;
u16 in_width, in_height;
const int maxsinglelinewidth = dispc->feat->max_line_width;
do {
in_height = height / *decim_y;
in_width = width / *decim_x;
*five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
if (in_height > out_height &&
in_height < out_height * 2)
*five_taps = false;
again:
if (*five_taps)
*core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
out_height, fourcc);
else
*core_clk = dispc->feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
error = check_horiz_timing_omap3(pclk, lclk, vm,
pos_x, in_width, in_height, out_width,
out_height, *five_taps);
if (error && *five_taps) {
*five_taps = false;
goto again;
}
error = (error || in_width > maxsinglelinewidth * 2 ||
(in_width > maxsinglelinewidth && *five_taps) ||
!*core_clk || *core_clk > dispc_core_clk_rate(dispc));
if (!error) {
/* verify that we're inside the limits of scaler */
if (in_width / 4 > out_width)
error = 1;
if (*five_taps) {
if (in_height / 4 > out_height)
error = 1;
} else {
if (in_height / 2 > out_height)
error = 1;
}
}
if (error)
++*decim_y;
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
if (error) {
DSSERR("failed to find scaling settings\n");
return -EINVAL;
}
if (check_horiz_timing_omap3(pclk, lclk, vm, pos_x, in_width,
in_height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
}
if (in_width > (maxsinglelinewidth * 2)) {
DSSERR("Cannot setup scaling\n");
DSSERR("width exceeds maximum width possible\n");
return -EINVAL;
}
if (in_width > maxsinglelinewidth && *five_taps) {
DSSERR("cannot setup scaling with five taps\n");
return -EINVAL;
}
return 0;
}
static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height,
u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim,
int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk,
bool mem_to_mem)
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
u16 in_height = height / *decim_y;
const int maxsinglelinewidth = dispc->feat->max_line_width;
const int maxdownscale = dispc->feat->max_downscale;
if (mem_to_mem) {
in_width_max = out_width * maxdownscale;
} else {
in_width_max = dispc_core_clk_rate(dispc)
/ DIV_ROUND_UP(pclk, out_width);
}
*decim_x = DIV_ROUND_UP(width, in_width_max);
*decim_x = max(*decim_x, decim_x_min);
if (*decim_x > *x_predecim)
return -EINVAL;
do {
in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
if (in_width > maxsinglelinewidth) {
DSSERR("Cannot scale width exceeds max line width\n");
return -EINVAL;
}
if (*decim_x > 4 && fourcc != DRM_FORMAT_NV12) {
/*
* Let's disable all scaling that requires horizontal
* decimation with higher factor than 4, until we have
* better estimates of what we can and can not
* do. However, NV12 color format appears to work Ok
* with all decimation factors.
*
* When decimating horizontally by more that 4 the dss
* is not able to fetch the data in burst mode. When
* this happens it is hard to tell if there enough
* bandwidth. Despite what theory says this appears to
* be true also for 16-bit color formats.
*/
DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)\n", *decim_x);
return -EINVAL;
}
*core_clk = dispc->feat->calc_core_clk(pclk, in_width, in_height,
out_width, out_height, mem_to_mem);
return 0;
}
enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane)
{
return dispc->feat->overlay_caps[plane];
}
#define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
enum omap_plane_id plane,
unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
const struct videomode *vm,
u16 width, u16 height,
u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
enum omap_dss_rotation_type rotation_type,
bool mem_to_mem)
{
int maxhdownscale = dispc->feat->max_downscale;
int maxvdownscale = dispc->feat->max_downscale;
const int max_decim_limit = 16;
unsigned long core_clk = 0;
int decim_x, decim_y, ret;
if (width == out_width && height == out_height)
return 0;
if (dispc->feat->supported_scaler_color_modes) {
const u32 *modes = dispc->feat->supported_scaler_color_modes;
unsigned int i;
for (i = 0; modes[i]; ++i) {
if (modes[i] == fourcc)
break;
}
if (modes[i] == 0)
return -EINVAL;
}
if (plane == OMAP_DSS_WB) {
switch (fourcc) {
case DRM_FORMAT_NV12:
maxhdownscale = maxvdownscale = 2;
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
maxhdownscale = 2;
maxvdownscale = 4;
break;
default:
break;
}
}
if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
}
if ((caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
if (mem_to_mem) {
*x_predecim = *y_predecim = 1;
} else {
*x_predecim = max_decim_limit;
*y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
dispc_has_feature(dispc, FEAT_BURST_2D)) ?
2 : max_decim_limit;
}
decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxhdownscale);
decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxvdownscale);
if (decim_x > *x_predecim || out_width > width * 8)
return -EINVAL;
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
ret = dispc->feat->calc_scaling(dispc, pclk, lclk, vm, width, height,
out_width, out_height, fourcc,
five_taps, x_predecim, y_predecim,
&decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
if (ret)
return ret;
DSSDBG("%dx%d -> %dx%d (%d.%02d x %d.%02d), decim %dx%d %dx%d (%d.%02d x %d.%02d), taps %d, req clk %lu, cur clk %lu\n",
width, height,
out_width, out_height,
out_width / width, DIV_FRAC(out_width, width),
out_height / height, DIV_FRAC(out_height, height),
decim_x, decim_y,
width / decim_x, height / decim_y,
out_width / (width / decim_x), DIV_FRAC(out_width, width / decim_x),
out_height / (height / decim_y), DIV_FRAC(out_height, height / decim_y),
*five_taps ? 5 : 3,
core_clk, dispc_core_clk_rate(dispc));
if (!core_clk || core_clk > dispc_core_clk_rate(dispc)) {
DSSERR("failed to set up scaling, "
"required core clk rate = %lu Hz, "
"current core clk rate = %lu Hz\n",
core_clk, dispc_core_clk_rate(dispc));
return -EINVAL;
}
*x_predecim = decim_x;
*y_predecim = decim_y;
return 0;
}
void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height)
{
*width = dispc->feat->ovl_width_max;
*height = dispc->feat->ovl_height_max;
}
static int dispc_ovl_setup_common(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
u32 paddr, u32 p_uv_addr,
u16 screen_width, int pos_x, int pos_y,
u16 width, u16 height,
u16 out_width, u16 out_height,
u32 fourcc, u8 rotation, u8 zorder,
u8 pre_mult_alpha, u8 global_alpha,
enum omap_dss_rotation_type rotation_type,
bool replication, const struct videomode *vm,
bool mem_to_mem,
enum drm_color_encoding color_encoding,
enum drm_color_range color_range)
{
bool five_taps = true;
bool fieldmode = false;
int r, cconv = 0;
unsigned int offset0, offset1;
s32 row_inc;
s32 pix_inc;
u16 frame_width;
unsigned int field_offset = 0;
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(dispc, plane);
unsigned long lclk = dispc_plane_lclk_rate(dispc, plane);
const struct drm_format_info *info;
info = drm_format_info(fourcc);
/* when setting up WB, dispc_plane_pclk_rate() returns 0 */
if (plane == OMAP_DSS_WB)
pclk = vm->pixelclock;
if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
if (info->is_yuv && (in_width & 1)) {
DSSERR("input width %d is not even for YUV format\n", in_width);
return -EINVAL;
}
out_width = out_width == 0 ? width : out_width;
out_height = out_height == 0 ? height : out_height;
if (plane != OMAP_DSS_WB) {
if (ilace && height == out_height)
fieldmode = true;
if (ilace) {
if (fieldmode)
in_height /= 2;
pos_y /= 2;
out_height /= 2;
DSSDBG("adjusting for ilace: height %d, pos_y %d, out_height %d\n",
in_height, pos_y, out_height);
}
}
if (!dispc_ovl_color_mode_supported(dispc, plane, fourcc))
return -EINVAL;
r = dispc_ovl_calc_scaling(dispc, plane, pclk, lclk, caps, vm, in_width,
in_height, out_width, out_height, fourcc,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
if (r)
return r;
in_width = in_width / x_predecim;
in_height = in_height / y_predecim;
if (x_predecim > 1 || y_predecim > 1)
DSSDBG("predecimation %d x %x, new input size %d x %d\n",
x_predecim, y_predecim, in_width, in_height);
if (info->is_yuv && (in_width & 1)) {
DSSDBG("predecimated input width is not even for YUV format\n");
DSSDBG("adjusting input width %d -> %d\n",
in_width, in_width & ~1);
in_width &= ~1;
}
if (info->is_yuv)
cconv = 1;
if (ilace && !fieldmode) {
/*
* when downscaling the bottom field may have to start several
* source lines below the top field. Unfortunately ACCUI
* registers will only hold the fractional part of the offset
* so the integer part must be added to the base address of the
* bottom field.
*/
if (!in_height || in_height == out_height)
field_offset = 0;
else
field_offset = in_height / out_height / 2;
}
/* Fields are independent but interleaved in memory. */
if (fieldmode)
field_offset = 1;
offset0 = 0;
offset1 = 0;
row_inc = 0;
pix_inc = 0;
if (plane == OMAP_DSS_WB)
frame_width = out_width;
else
frame_width = in_width;
calc_offset(screen_width, frame_width,
fourcc, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim,
rotation_type, rotation);
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
dispc_ovl_set_color_mode(dispc, plane, fourcc);
dispc_ovl_configure_burst_type(dispc, plane, rotation_type);
if (dispc->feat->reverse_ilace_field_order)
swap(offset0, offset1);
dispc_ovl_set_ba0(dispc, plane, paddr + offset0);
dispc_ovl_set_ba1(dispc, plane, paddr + offset1);
if (fourcc == DRM_FORMAT_NV12) {
dispc_ovl_set_ba0_uv(dispc, plane, p_uv_addr + offset0);
dispc_ovl_set_ba1_uv(dispc, plane, p_uv_addr + offset1);
}
if (dispc->feat->last_pixel_inc_missing)
row_inc += pix_inc - 1;
dispc_ovl_set_row_inc(dispc, plane, row_inc);
dispc_ovl_set_pix_inc(dispc, plane, pix_inc);
DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, in_width,
in_height, out_width, out_height);
dispc_ovl_set_pos(dispc, plane, caps, pos_x, pos_y);
dispc_ovl_set_input_size(dispc, plane, in_width, in_height);
if (caps & OMAP_DSS_OVL_CAP_SCALE) {
dispc_ovl_set_scaling(dispc, plane, in_width, in_height,
out_width, out_height, ilace, five_taps,
fieldmode, fourcc, rotation);
dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
if (plane != OMAP_DSS_WB)
dispc_ovl_set_csc(dispc, plane, color_encoding, color_range);
}
dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
fourcc);
dispc_ovl_set_zorder(dispc, plane, caps, zorder);
dispc_ovl_set_pre_mult_alpha(dispc, plane, caps, pre_mult_alpha);
dispc_ovl_setup_global_alpha(dispc, plane, caps, global_alpha);
dispc_ovl_enable_replication(dispc, plane, caps, replication);
return 0;
}
int dispc_ovl_setup(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct omap_overlay_info *oi,
const struct videomode *vm, bool mem_to_mem,
enum omap_channel channel)
{
int r;
enum omap_overlay_caps caps = dispc->feat->overlay_caps[plane];
const bool replication = true;
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
" %dx%d, cmode %x, rot %d, chan %d repl %d\n",
plane, &oi->paddr, &oi->p_uv_addr, oi->screen_width, oi->pos_x,
oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
oi->fourcc, oi->rotation, channel, replication);
dispc_ovl_set_channel_out(dispc, plane, channel);
r = dispc_ovl_setup_common(dispc, plane, caps, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
oi->rotation_type, replication, vm, mem_to_mem,
oi->color_encoding, oi->color_range);
return r;
}
int dispc_wb_setup(struct dispc_device *dispc,
const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm,
enum dss_writeback_channel channel_in)
{
int r;
u32 l;
enum omap_plane_id plane = OMAP_DSS_WB;
const int pos_x = 0, pos_y = 0;
const u8 zorder = 0, global_alpha = 0;
const bool replication = true;
bool truncation;
int in_width = vm->hactive;
int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
in_height /= 2;
DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
"rot %d\n", wi->paddr, wi->p_uv_addr, in_width,
in_height, wi->width, wi->height, wi->fourcc, wi->rotation);
r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (r)
return r;
switch (wi->fourcc) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XRGB4444:
truncation = true;
break;
default:
truncation = false;
break;
}
/* setup extra DISPC_WB_ATTRIBUTES */
l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */
l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
if (mem_to_mem)
l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */
else
l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */
dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
if (mem_to_mem) {
/* WBDELAYCOUNT */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
} else {
u32 wbdelay;
if (channel_in == DSS_WB_TV_MGR)
wbdelay = vm->vsync_len + vm->vback_porch;
else
wbdelay = vm->vfront_porch + vm->vsync_len +
vm->vback_porch;
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
wbdelay /= 2;
wbdelay = min(wbdelay, 255u);
/* WBDELAYCOUNT */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
}
return 0;
}
bool dispc_has_writeback(struct dispc_device *dispc)
{
return dispc->feat->has_writeback;
}
int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
return 0;
}
static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc,
bool act_high)
{
if (!dispc_has_feature(dispc, FEAT_LCDENABLEPOL))
return;
REG_FLD_MOD(dispc, DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
}
void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable)
{
if (!dispc_has_feature(dispc, FEAT_LCDENABLESIGNAL))
return;
REG_FLD_MOD(dispc, DISPC_CONTROL, enable ? 1 : 0, 28, 28);
}
void dispc_pck_free_enable(struct dispc_device *dispc, bool enable)
{
if (!dispc_has_feature(dispc, FEAT_PCKFREEENABLE))
return;
REG_FLD_MOD(dispc, DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
static void dispc_mgr_enable_fifohandcheck(struct dispc_device *dispc,
enum omap_channel channel,
bool enable)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
}
static void dispc_mgr_set_lcd_type_tft(struct dispc_device *dispc,
enum omap_channel channel)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STNTFT, 1);
}
static void dispc_set_loadmode(struct dispc_device *dispc,
enum omap_dss_load_mode mode)
{
REG_FLD_MOD(dispc, DISPC_CONFIG, mode, 2, 1);
}
static void dispc_mgr_set_default_color(struct dispc_device *dispc,
enum omap_channel channel, u32 color)
{
dispc_write_reg(dispc, DISPC_DEFAULT_COLOR(channel), color);
}
static void dispc_mgr_set_trans_key(struct dispc_device *dispc,
enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
mgr_fld_write(dispc, ch, DISPC_MGR_FLD_TCKSELECTION, type);
dispc_write_reg(dispc, DISPC_TRANS_COLOR(ch), trans_key);
}
static void dispc_mgr_enable_trans_key(struct dispc_device *dispc,
enum omap_channel ch, bool enable)
{
mgr_fld_write(dispc, ch, DISPC_MGR_FLD_TCKENABLE, enable);
}
static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
enum omap_channel ch,
bool enable)
{
if (!dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
}
void dispc_mgr_setup(struct dispc_device *dispc,
enum omap_channel channel,
const struct omap_overlay_manager_info *info)
{
dispc_mgr_set_default_color(dispc, channel, info->default_color);
dispc_mgr_set_trans_key(dispc, channel, info->trans_key_type,
info->trans_key);
dispc_mgr_enable_trans_key(dispc, channel, info->trans_enabled);
dispc_mgr_enable_alpha_fixed_zorder(dispc, channel,
info->partial_alpha_enabled);
if (dispc_has_feature(dispc, FEAT_CPR)) {
dispc_mgr_enable_cpr(dispc, channel, info->cpr_enable);
dispc_mgr_set_cpr_coef(dispc, channel, &info->cpr_coefs);
}
}
static void dispc_mgr_set_tft_data_lines(struct dispc_device *dispc,
enum omap_channel channel,
u8 data_lines)
{
int code;
switch (data_lines) {
case 12:
code = 0;
break;
case 16:
code = 1;
break;
case 18:
code = 2;
break;
case 24:
code = 3;
break;
default:
BUG();
return;
}
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_TFTDATALINES, code);
}
static void dispc_mgr_set_io_pad_mode(struct dispc_device *dispc,
enum dss_io_pad_mode mode)
{
u32 l;
int gpout0, gpout1;
switch (mode) {
case DSS_IO_PAD_MODE_RESET:
gpout0 = 0;
gpout1 = 0;
break;
case DSS_IO_PAD_MODE_RFBI:
gpout0 = 1;
gpout1 = 0;
break;
case DSS_IO_PAD_MODE_BYPASS:
gpout0 = 1;
gpout1 = 1;
break;
default:
BUG();
return;
}
l = dispc_read_reg(dispc, DISPC_CONTROL);
l = FLD_MOD(l, gpout0, 15, 15);
l = FLD_MOD(l, gpout1, 16, 16);
dispc_write_reg(dispc, DISPC_CONTROL, l);
}
static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
}
void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
dispc_mgr_set_io_pad_mode(dispc, config->io_pad_mode);
dispc_mgr_enable_stallmode(dispc, channel, config->stallmode);
dispc_mgr_enable_fifohandcheck(dispc, channel, config->fifohandcheck);
dispc_mgr_set_clock_div(dispc, channel, &config->clock_info);
dispc_mgr_set_tft_data_lines(dispc, channel, config->video_port_width);
dispc_lcd_enable_signal_polarity(dispc, config->lcden_sig_polarity);
dispc_mgr_set_lcd_type_tft(dispc, channel);
}
static bool _dispc_mgr_size_ok(struct dispc_device *dispc,
u16 width, u16 height)
{
return width <= dispc->feat->mgr_width_max &&
height <= dispc->feat->mgr_height_max;
}
static bool _dispc_lcd_timings_ok(struct dispc_device *dispc,
int hsync_len, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
if (hsync_len < 1 || hsync_len > dispc->feat->sw_max ||
hfp < 1 || hfp > dispc->feat->hp_max ||
hbp < 1 || hbp > dispc->feat->hp_max ||
vsw < 1 || vsw > dispc->feat->sw_max ||
vfp < 0 || vfp > dispc->feat->vp_max ||
vbp < 0 || vbp > dispc->feat->vp_max)
return false;
return true;
}
static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
return pclk <= dispc->feat->max_lcd_pclk;
else
return pclk <= dispc->feat->max_tv_pclk;
}
int dispc_mgr_check_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive))
return MODE_BAD;
if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock))
return MODE_BAD;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return MODE_BAD;
if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len,
vm->hfront_porch, vm->hback_porch,
vm->vsync_len, vm->vfront_porch,
vm->vback_porch))
return MODE_BAD;
}
return MODE_OK;
}
static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
timing_h = FLD_VAL(vm->hsync_len - 1, dispc->feat->sw_start, 0) |
FLD_VAL(vm->hfront_porch - 1, dispc->feat->fp_start, 8) |
FLD_VAL(vm->hback_porch - 1, dispc->feat->bp_start, 20);
timing_v = FLD_VAL(vm->vsync_len - 1, dispc->feat->sw_start, 0) |
FLD_VAL(vm->vfront_porch, dispc->feat->fp_start, 8) |
FLD_VAL(vm->vback_porch, dispc->feat->bp_start, 20);
dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
vs = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
hs = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
de = !!(vm->flags & DISPLAY_FLAGS_DE_LOW);
ipc = !!(vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE);
onoff = true; /* always use the 'rf' setting */
rf = !!(vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE);
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
FLD_VAL(de, 15, 15) |
FLD_VAL(ipc, 14, 14) |
FLD_VAL(hs, 13, 13) |
FLD_VAL(vs, 12, 12);
/* always set ALIGN bit when available */
if (dispc->feat->supports_sync_align)
l |= (1 << 18);
dispc_write_reg(dispc, DISPC_POL_FREQ(channel), l);
if (dispc->syscon_pol) {
const int shifts[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
[OMAP_DSS_CHANNEL_LCD2] = 1,
[OMAP_DSS_CHANNEL_LCD3] = 2,
};
u32 mask, val;
mask = (1 << 0) | (1 << 3) | (1 << 6);
val = (rf << 0) | (ipc << 3) | (onoff << 6);
mask <<= 16 + shifts[channel];
val <<= 16 + shifts[channel];
regmap_update_bits(dispc->syscon_pol, dispc->syscon_pol_offset,
mask, val);
}
}
static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
enum display_flags low)
{
if (flags & high)
return 1;
if (flags & low)
return -1;
return 0;
}
/* change name to mode? */
void dispc_mgr_set_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
unsigned int xtot, ytot;
unsigned long ht, vt;
struct videomode t = *vm;
DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
if (dispc_mgr_check_timings(dispc, channel, &t)) {
BUG();
return;
}
if (dss_mgr_is_lcd(channel)) {
_dispc_mgr_set_lcd_timings(dispc, channel, &t);
xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
ht = vm->pixelclock / xtot;
vt = vm->pixelclock / xtot / ytot;
DSSDBG("pck %lu\n", vm->pixelclock);
DSSDBG("hsync_len %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
t.hsync_len, t.hfront_porch, t.hback_porch,
t.vsync_len, t.vfront_porch, t.vback_porch);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
vm_flag_to_int(t.flags, DISPLAY_FLAGS_VSYNC_HIGH, DISPLAY_FLAGS_VSYNC_LOW),
vm_flag_to_int(t.flags, DISPLAY_FLAGS_HSYNC_HIGH, DISPLAY_FLAGS_HSYNC_LOW),
vm_flag_to_int(t.flags, DISPLAY_FLAGS_PIXDATA_POSEDGE, DISPLAY_FLAGS_PIXDATA_NEGEDGE),
vm_flag_to_int(t.flags, DISPLAY_FLAGS_DE_HIGH, DISPLAY_FLAGS_DE_LOW),
vm_flag_to_int(t.flags, DISPLAY_FLAGS_SYNC_POSEDGE, DISPLAY_FLAGS_SYNC_NEGEDGE));
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
if (t.flags & DISPLAY_FLAGS_INTERLACED)
t.vactive /= 2;
if (dispc->feat->supports_double_pixel)
REG_FLD_MOD(dispc, DISPC_CONTROL,
!!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
19, 17);
}
dispc_mgr_set_size(dispc, channel, t.hactive, t.vactive);
}
static void dispc_mgr_set_lcd_divisor(struct dispc_device *dispc,
enum omap_channel channel, u16 lck_div,
u16 pck_div)
{
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 1);
dispc_write_reg(dispc, DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
if (!dispc_has_feature(dispc, FEAT_CORE_CLK_DIV) &&
channel == OMAP_DSS_CHANNEL_LCD)
dispc->core_clk_rate = dispc_fclk_rate(dispc) / lck_div;
}
static void dispc_mgr_get_lcd_divisor(struct dispc_device *dispc,
enum omap_channel channel, int *lck_div,
int *pck_div)
{
u32 l;
l = dispc_read_reg(dispc, DISPC_DIVISORo(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
static unsigned long dispc_fclk_rate(struct dispc_device *dispc)
{
unsigned long r;
enum dss_clk_source src;
src = dss_get_dispc_clk_source(dispc->dss);
if (src == DSS_CLK_SRC_FCK) {
r = dss_get_dispc_clk_rate(dispc->dss);
} else {
struct dss_pll *pll;
unsigned int clkout_idx;
pll = dss_pll_find_by_src(dispc->dss, src);
clkout_idx = dss_pll_get_clkout_idx_for_src(src);
r = pll->cinfo.clkout[clkout_idx];
}
return r;
}
static unsigned long dispc_mgr_lclk_rate(struct dispc_device *dispc,
enum omap_channel channel)
{
int lcd;
unsigned long r;
enum dss_clk_source src;
/* for TV, LCLK rate is the FCLK rate */
if (!dss_mgr_is_lcd(channel))
return dispc_fclk_rate(dispc);
src = dss_get_lcd_clk_source(dispc->dss, channel);
if (src == DSS_CLK_SRC_FCK) {
r = dss_get_dispc_clk_rate(dispc->dss);
} else {
struct dss_pll *pll;
unsigned int clkout_idx;
pll = dss_pll_find_by_src(dispc->dss, src);
clkout_idx = dss_pll_get_clkout_idx_for_src(src);
r = pll->cinfo.clkout[clkout_idx];
}
lcd = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
return r / lcd;
}
static unsigned long dispc_mgr_pclk_rate(struct dispc_device *dispc,
enum omap_channel channel)
{
unsigned long r;
if (dss_mgr_is_lcd(channel)) {
int pcd;
u32 l;
l = dispc_read_reg(dispc, DISPC_DIVISORo(channel));
pcd = FLD_GET(l, 7, 0);
r = dispc_mgr_lclk_rate(dispc, channel);
return r / pcd;
} else {
return dispc->tv_pclk_rate;
}
}
void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk)
{
dispc->tv_pclk_rate = pclk;
}
static unsigned long dispc_core_clk_rate(struct dispc_device *dispc)
{
return dispc->core_clk_rate;
}
static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane)
{
enum omap_channel channel;
if (plane == OMAP_DSS_WB)
return 0;
channel = dispc_ovl_get_channel_out(dispc, plane);
return dispc_mgr_pclk_rate(dispc, channel);
}
static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane)
{
enum omap_channel channel;
if (plane == OMAP_DSS_WB)
return 0;
channel = dispc_ovl_get_channel_out(dispc, plane);
return dispc_mgr_lclk_rate(dispc, channel);
}
static void dispc_dump_clocks_channel(struct dispc_device *dispc,
struct seq_file *s,
enum omap_channel channel)
{
int lcd, pcd;
enum dss_clk_source lcd_clk_src;
seq_printf(s, "- %s -\n", mgr_desc[channel].name);
lcd_clk_src = dss_get_lcd_clk_source(dispc->dss, channel);
seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
dss_get_clk_source_name(lcd_clk_src));
dispc_mgr_get_lcd_divisor(dispc, channel, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
dispc_mgr_lclk_rate(dispc, channel), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_mgr_pclk_rate(dispc, channel), pcd);
}
void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s)
{
enum dss_clk_source dispc_clk_src;
int lcd;
u32 l;
if (dispc_runtime_get(dispc))
return;
seq_printf(s, "- DISPC -\n");
dispc_clk_src = dss_get_dispc_clk_source(dispc->dss);
seq_printf(s, "dispc fclk source = %s\n",
dss_get_clk_source_name(dispc_clk_src));
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate(dispc));
if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV)) {
seq_printf(s, "- DISPC-CORE-CLK -\n");
l = dispc_read_reg(dispc, DISPC_DIVISOR);
lcd = FLD_GET(l, 23, 16);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
(dispc_fclk_rate(dispc)/lcd), lcd);
}
dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD);
if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD2);
if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD3);
dispc_runtime_put(dispc);
}
static int dispc_dump_regs(struct seq_file *s, void *p)
{
struct dispc_device *dispc = s->private;
int i, j;
const char *mgr_names[] = {
[OMAP_DSS_CHANNEL_LCD] = "LCD",
[OMAP_DSS_CHANNEL_DIGIT] = "TV",
[OMAP_DSS_CHANNEL_LCD2] = "LCD2",
[OMAP_DSS_CHANNEL_LCD3] = "LCD3",
};
const char *ovl_names[] = {
[OMAP_DSS_GFX] = "GFX",
[OMAP_DSS_VIDEO1] = "VID1",
[OMAP_DSS_VIDEO2] = "VID2",
[OMAP_DSS_VIDEO3] = "VID3",
[OMAP_DSS_WB] = "WB",
};
const char **p_names;
#define DUMPREG(dispc, r) \
seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(dispc, r))
if (dispc_runtime_get(dispc))
return 0;
/* DISPC common registers */
DUMPREG(dispc, DISPC_REVISION);
DUMPREG(dispc, DISPC_SYSCONFIG);
DUMPREG(dispc, DISPC_SYSSTATUS);
DUMPREG(dispc, DISPC_IRQSTATUS);
DUMPREG(dispc, DISPC_IRQENABLE);
DUMPREG(dispc, DISPC_CONTROL);
DUMPREG(dispc, DISPC_CONFIG);
DUMPREG(dispc, DISPC_CAPABLE);
DUMPREG(dispc, DISPC_LINE_STATUS);
DUMPREG(dispc, DISPC_LINE_NUMBER);
if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
DUMPREG(dispc, DISPC_GLOBAL_ALPHA);
if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
DUMPREG(dispc, DISPC_CONTROL2);
DUMPREG(dispc, DISPC_CONFIG2);
}
if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
DUMPREG(dispc, DISPC_CONTROL3);
DUMPREG(dispc, DISPC_CONFIG3);
}
if (dispc_has_feature(dispc, FEAT_MFLAG))
DUMPREG(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
#define DISPC_REG(i, name) name(i)
#define DUMPREG(dispc, i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
(int)(48 - strlen(#r) - strlen(p_names[i])), " ", \
dispc_read_reg(dispc, DISPC_REG(i, r)))
p_names = mgr_names;
/* DISPC channel specific registers */
for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
DUMPREG(dispc, i, DISPC_DEFAULT_COLOR);
DUMPREG(dispc, i, DISPC_TRANS_COLOR);
DUMPREG(dispc, i, DISPC_SIZE_MGR);
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
DUMPREG(dispc, i, DISPC_TIMING_H);
DUMPREG(dispc, i, DISPC_TIMING_V);
DUMPREG(dispc, i, DISPC_POL_FREQ);
DUMPREG(dispc, i, DISPC_DIVISORo);
DUMPREG(dispc, i, DISPC_DATA_CYCLE1);
DUMPREG(dispc, i, DISPC_DATA_CYCLE2);
DUMPREG(dispc, i, DISPC_DATA_CYCLE3);
if (dispc_has_feature(dispc, FEAT_CPR)) {
DUMPREG(dispc, i, DISPC_CPR_COEF_R);
DUMPREG(dispc, i, DISPC_CPR_COEF_G);
DUMPREG(dispc, i, DISPC_CPR_COEF_B);
}
}
p_names = ovl_names;
for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
DUMPREG(dispc, i, DISPC_OVL_BA0);
DUMPREG(dispc, i, DISPC_OVL_BA1);
DUMPREG(dispc, i, DISPC_OVL_POSITION);
DUMPREG(dispc, i, DISPC_OVL_SIZE);
DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES);
DUMPREG(dispc, i, DISPC_OVL_FIFO_THRESHOLD);
DUMPREG(dispc, i, DISPC_OVL_FIFO_SIZE_STATUS);
DUMPREG(dispc, i, DISPC_OVL_ROW_INC);
DUMPREG(dispc, i, DISPC_OVL_PIXEL_INC);
if (dispc_has_feature(dispc, FEAT_PRELOAD))
DUMPREG(dispc, i, DISPC_OVL_PRELOAD);
if (dispc_has_feature(dispc, FEAT_MFLAG))
DUMPREG(dispc, i, DISPC_OVL_MFLAG_THRESHOLD);
if (i == OMAP_DSS_GFX) {
DUMPREG(dispc, i, DISPC_OVL_WINDOW_SKIP);
DUMPREG(dispc, i, DISPC_OVL_TABLE_BA);
continue;
}
DUMPREG(dispc, i, DISPC_OVL_FIR);
DUMPREG(dispc, i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(dispc, i, DISPC_OVL_ACCU0);
DUMPREG(dispc, i, DISPC_OVL_ACCU1);
if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(dispc, i, DISPC_OVL_BA0_UV);
DUMPREG(dispc, i, DISPC_OVL_BA1_UV);
DUMPREG(dispc, i, DISPC_OVL_FIR2);
DUMPREG(dispc, i, DISPC_OVL_ACCU2_0);
DUMPREG(dispc, i, DISPC_OVL_ACCU2_1);
}
if (dispc_has_feature(dispc, FEAT_ATTR2))
DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES2);
}
if (dispc->feat->has_writeback) {
i = OMAP_DSS_WB;
DUMPREG(dispc, i, DISPC_OVL_BA0);
DUMPREG(dispc, i, DISPC_OVL_BA1);
DUMPREG(dispc, i, DISPC_OVL_SIZE);
DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES);
DUMPREG(dispc, i, DISPC_OVL_FIFO_THRESHOLD);
DUMPREG(dispc, i, DISPC_OVL_FIFO_SIZE_STATUS);
DUMPREG(dispc, i, DISPC_OVL_ROW_INC);
DUMPREG(dispc, i, DISPC_OVL_PIXEL_INC);
if (dispc_has_feature(dispc, FEAT_MFLAG))
DUMPREG(dispc, i, DISPC_OVL_MFLAG_THRESHOLD);
DUMPREG(dispc, i, DISPC_OVL_FIR);
DUMPREG(dispc, i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(dispc, i, DISPC_OVL_ACCU0);
DUMPREG(dispc, i, DISPC_OVL_ACCU1);
if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(dispc, i, DISPC_OVL_BA0_UV);
DUMPREG(dispc, i, DISPC_OVL_BA1_UV);
DUMPREG(dispc, i, DISPC_OVL_FIR2);
DUMPREG(dispc, i, DISPC_OVL_ACCU2_0);
DUMPREG(dispc, i, DISPC_OVL_ACCU2_1);
}
if (dispc_has_feature(dispc, FEAT_ATTR2))
DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES2);
}
#undef DISPC_REG
#undef DUMPREG
#define DISPC_REG(plane, name, i) name(plane, i)
#define DUMPREG(dispc, plane, name, i) \
seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
(int)(46 - strlen(#name) - strlen(p_names[plane])), " ", \
dispc_read_reg(dispc, DISPC_REG(plane, name, i)))
/* Video pipeline coefficient registers */
/* start from OMAP_DSS_VIDEO1 */
for (i = 1; i < dispc_get_num_ovls(dispc); i++) {
for (j = 0; j < 8; j++)
DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_H, j);
for (j = 0; j < 8; j++)
DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_HV, j);
for (j = 0; j < 5; j++)
DUMPREG(dispc, i, DISPC_OVL_CONV_COEF, j);
if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_V, j);
}
if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
for (j = 0; j < 8; j++)
DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_H2, j);
for (j = 0; j < 8; j++)
DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_HV2, j);
for (j = 0; j < 8; j++)
DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_V2, j);
}
}
dispc_runtime_put(dispc);
#undef DISPC_REG
#undef DUMPREG
return 0;
}
/* calculate clock rates using dividers in cinfo */
int dispc_calc_clock_rates(struct dispc_device *dispc,
unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo)
{
if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
return -EINVAL;
if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
return -EINVAL;
cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
return 0;
}
bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data)
{
int lckd, lckd_start, lckd_stop;
int pckd, pckd_start, pckd_stop;
unsigned long pck, lck;
unsigned long lck_max;
unsigned long pckd_hw_min, pckd_hw_max;
unsigned int min_fck_per_pck;
unsigned long fck;
#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
#else
min_fck_per_pck = 0;
#endif
pckd_hw_min = dispc->feat->min_pcd;
pckd_hw_max = 255;
lck_max = dss_get_max_fck_rate(dispc->dss);
pck_min = pck_min ? pck_min : 1;
pck_max = pck_max ? pck_max : ULONG_MAX;
lckd_start = max(DIV_ROUND_UP(dispc_freq, lck_max), 1ul);
lckd_stop = min(dispc_freq / pck_min, 255ul);
for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) {
lck = dispc_freq / lckd;
pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min);
pckd_stop = min(lck / pck_min, pckd_hw_max);
for (pckd = pckd_start; pckd <= pckd_stop; ++pckd) {
pck = lck / pckd;
/*
* For OMAP2/3 the DISPC fclk is the same as LCD's logic
* clock, which means we're configuring DISPC fclk here
* also. Thus we need to use the calculated lck. For
* OMAP4+ the DISPC fclk is a separate clock.
*/
if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
fck = dispc_core_clk_rate(dispc);
else
fck = lck;
if (fck < pck * min_fck_per_pck)
continue;
if (func(lckd, pckd, lck, pck, data))
return true;
}
}
return false;
}
void dispc_mgr_set_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
const struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
dispc_mgr_set_lcd_divisor(dispc, channel, cinfo->lck_div,
cinfo->pck_div);
}
int dispc_mgr_get_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
unsigned long fck;
fck = dispc_fclk_rate(dispc);
cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
return 0;
}
u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
return dispc_read_reg(dispc, DISPC_IRQSTATUS);
}
void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
{
dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
}
void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
{
u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
dispc_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
dispc_write_reg(dispc, DISPC_IRQENABLE, mask);
/* flush posted write */
dispc_read_reg(dispc, DISPC_IRQENABLE);
}
void dispc_enable_sidle(struct dispc_device *dispc)
{
/* SIDLEMODE: smart idle */
REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 2, 4, 3);
}
void dispc_disable_sidle(struct dispc_device *dispc)
{
REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
if (!dispc->feat->has_gamma_table)
return 0;
return gdesc->len;
}
static void dispc_mgr_write_gamma_table(struct dispc_device *dispc,
enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
u32 *table = dispc->gamma_table[channel];
unsigned int i;
DSSDBG("%s: channel %d\n", __func__, channel);
for (i = 0; i < gdesc->len; ++i) {
u32 v = table[i];
if (gdesc->has_index)
v |= i << 24;
else if (i == 0)
v |= 1 << 31;
dispc_write_reg(dispc, gdesc->reg, v);
}
}
static void dispc_restore_gamma_tables(struct dispc_device *dispc)
{
DSSDBG("%s()\n", __func__);
if (!dispc->feat->has_gamma_table)
return;
dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD);
dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_DIGIT);
if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD2);
if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD3);
}
static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
{ .red = 0, .green = 0, .blue = 0, },
{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
};
void dispc_mgr_set_gamma(struct dispc_device *dispc,
enum omap_channel channel,
const struct drm_color_lut *lut,
unsigned int length)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
u32 *table = dispc->gamma_table[channel];
uint i;
DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
channel, length, gdesc->len);
if (!dispc->feat->has_gamma_table)
return;
if (lut == NULL || length < 2) {
lut = dispc_mgr_gamma_default_lut;
length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
}
for (i = 0; i < length - 1; ++i) {
uint first = i * (gdesc->len - 1) / (length - 1);
uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
uint w = last - first;
u16 r, g, b;
uint j;
if (w == 0)
continue;
for (j = 0; j <= w; j++) {
r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
r >>= 16 - gdesc->bits;
g >>= 16 - gdesc->bits;
b >>= 16 - gdesc->bits;
table[first + j] = (r << (gdesc->bits * 2)) |
(g << gdesc->bits) | b;
}
}
if (dispc->is_enabled)
dispc_mgr_write_gamma_table(dispc, channel);
}
static int dispc_init_gamma_tables(struct dispc_device *dispc)
{
int channel;
if (!dispc->feat->has_gamma_table)
return 0;
for (channel = 0; channel < ARRAY_SIZE(dispc->gamma_table); channel++) {
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
u32 *gt;
if (channel == OMAP_DSS_CHANNEL_LCD2 &&
!dispc_has_feature(dispc, FEAT_MGR_LCD2))
continue;
if (channel == OMAP_DSS_CHANNEL_LCD3 &&
!dispc_has_feature(dispc, FEAT_MGR_LCD3))
continue;
gt = devm_kmalloc_array(&dispc->pdev->dev, gdesc->len,
sizeof(u32), GFP_KERNEL);
if (!gt)
return -ENOMEM;
dispc->gamma_table[channel] = gt;
dispc_mgr_set_gamma(dispc, channel, NULL, 0);
}
return 0;
}
static void _omap_dispc_initial_config(struct dispc_device *dispc)
{
u32 l;
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(dispc, DISPC_DIVISOR);
/* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
l = FLD_MOD(l, 1, 0, 0);
l = FLD_MOD(l, 1, 23, 16);
dispc_write_reg(dispc, DISPC_DIVISOR, l);
dispc->core_clk_rate = dispc_fclk_rate(dispc);
}
/* Use gamma table mode, instead of palette mode */
if (dispc->feat->has_gamma_table)
REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 3, 3);
/* For older DSS versions (FEAT_FUNCGATED) this enables
* func-clock auto-gating. For newer versions
* (dispc->feat->has_gamma_table) this enables tv-out gamma tables.
*/
if (dispc_has_feature(dispc, FEAT_FUNCGATED) ||
dispc->feat->has_gamma_table)
REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
dispc_init_fifos(dispc);
dispc_configure_burst_sizes(dispc);
dispc_ovl_enable_zorder_planes(dispc);
if (dispc->feat->mstandby_workaround)
REG_FLD_MOD(dispc, DISPC_MSTANDBY_CTRL, 1, 0, 0);
if (dispc_has_feature(dispc, FEAT_MFLAG))
dispc_init_mflag(dispc);
}
static const enum dispc_feature_id omap2_dispc_features_list[] = {
FEAT_LCDENABLEPOL,
FEAT_LCDENABLESIGNAL,
FEAT_PCKFREEENABLE,
FEAT_FUNCGATED,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
};
static const enum dispc_feature_id omap3_dispc_features_list[] = {
FEAT_LCDENABLEPOL,
FEAT_LCDENABLESIGNAL,
FEAT_PCKFREEENABLE,
FEAT_FUNCGATED,
FEAT_LINEBUFFERSPLIT,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
FEAT_CPR,
FEAT_PRELOAD,
FEAT_FIR_COEF_V,
FEAT_ALPHA_FIXED_ZORDER,
FEAT_FIFO_MERGE,
FEAT_OMAP3_DSI_FIFO_BUG,
};
static const enum dispc_feature_id am43xx_dispc_features_list[] = {
FEAT_LCDENABLEPOL,
FEAT_LCDENABLESIGNAL,
FEAT_PCKFREEENABLE,
FEAT_FUNCGATED,
FEAT_LINEBUFFERSPLIT,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
FEAT_CPR,
FEAT_PRELOAD,
FEAT_FIR_COEF_V,
FEAT_ALPHA_FIXED_ZORDER,
FEAT_FIFO_MERGE,
};
static const enum dispc_feature_id omap4_dispc_features_list[] = {
FEAT_MGR_LCD2,
FEAT_CORE_CLK_DIV,
FEAT_HANDLE_UV_SEPARATE,
FEAT_ATTR2,
FEAT_CPR,
FEAT_PRELOAD,
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
FEAT_BURST_2D,
};
static const enum dispc_feature_id omap5_dispc_features_list[] = {
FEAT_MGR_LCD2,
FEAT_MGR_LCD3,
FEAT_CORE_CLK_DIV,
FEAT_HANDLE_UV_SEPARATE,
FEAT_ATTR2,
FEAT_CPR,
FEAT_PRELOAD,
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
FEAT_BURST_2D,
FEAT_MFLAG,
};
static const struct dss_reg_field omap2_dispc_reg_fields[] = {
[FEAT_REG_FIRHINC] = { 11, 0 },
[FEAT_REG_FIRVINC] = { 27, 16 },
[FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
[FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
[FEAT_REG_FIFOSIZE] = { 8, 0 },
[FEAT_REG_HORIZONTALACCU] = { 9, 0 },
[FEAT_REG_VERTICALACCU] = { 25, 16 },
};
static const struct dss_reg_field omap3_dispc_reg_fields[] = {
[FEAT_REG_FIRHINC] = { 12, 0 },
[FEAT_REG_FIRVINC] = { 28, 16 },
[FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
[FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
[FEAT_REG_FIFOSIZE] = { 10, 0 },
[FEAT_REG_HORIZONTALACCU] = { 9, 0 },
[FEAT_REG_VERTICALACCU] = { 25, 16 },
};
static const struct dss_reg_field omap4_dispc_reg_fields[] = {
[FEAT_REG_FIRHINC] = { 12, 0 },
[FEAT_REG_FIRVINC] = { 28, 16 },
[FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
[FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
[FEAT_REG_FIFOSIZE] = { 15, 0 },
[FEAT_REG_HORIZONTALACCU] = { 10, 0 },
[FEAT_REG_VERTICALACCU] = { 26, 16 },
};
static const enum omap_overlay_caps omap2_dispc_overlay_caps[] = {
/* OMAP_DSS_GFX */
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO1 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO2 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
};
static const enum omap_overlay_caps omap3430_dispc_overlay_caps[] = {
/* OMAP_DSS_GFX */
OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO1 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO2 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
};
static const enum omap_overlay_caps omap3630_dispc_overlay_caps[] = {
/* OMAP_DSS_GFX */
OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO1 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO2 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
};
static const enum omap_overlay_caps omap4_dispc_overlay_caps[] = {
/* OMAP_DSS_GFX */
OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS |
OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO1 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO2 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
/* OMAP_DSS_VIDEO3 */
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
};
#define COLOR_ARRAY(arr...) (const u32[]) { arr, 0 }
static const u32 *omap2_dispc_supported_color_modes[] = {
/* OMAP_DSS_GFX */
COLOR_ARRAY(
DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888),
/* OMAP_DSS_VIDEO1 */
COLOR_ARRAY(
DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY),
/* OMAP_DSS_VIDEO2 */
COLOR_ARRAY(
DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY),
};
static const u32 *omap3_dispc_supported_color_modes[] = {
/* OMAP_DSS_GFX */
COLOR_ARRAY(
DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
/* OMAP_DSS_VIDEO1 */
COLOR_ARRAY(
DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888,
DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
DRM_FORMAT_YUYV, DRM_FORMAT_UYVY),
/* OMAP_DSS_VIDEO2 */
COLOR_ARRAY(
DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
};
static const u32 *omap4_dispc_supported_color_modes[] = {
/* OMAP_DSS_GFX */
COLOR_ARRAY(
DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888,
DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB4444,
DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB1555),
/* OMAP_DSS_VIDEO1 */
COLOR_ARRAY(
DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
DRM_FORMAT_RGBX8888),
/* OMAP_DSS_VIDEO2 */
COLOR_ARRAY(
DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
DRM_FORMAT_RGBX8888),
/* OMAP_DSS_VIDEO3 */
COLOR_ARRAY(
DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
DRM_FORMAT_RGBX8888),
/* OMAP_DSS_WB */
COLOR_ARRAY(
DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
DRM_FORMAT_RGBX8888),
};
static const u32 omap3_dispc_supported_scaler_color_modes[] = {
DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
0,
};
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
.bp_start = 27,
.sw_max = 64,
.vp_max = 255,
.hp_max = 256,
.mgr_width_start = 10,
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 66500000,
.max_downscale = 2,
/*
* Assume the line width buffer to be 768 pixels as OMAP2 DISPC scaler
* cannot scale an image width larger than 768.
*/
.max_line_width = 768,
.min_pcd = 2,
.calc_scaling = dispc_ovl_calc_scaling_24xx,
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
.features = omap2_dispc_features_list,
.num_features = ARRAY_SIZE(omap2_dispc_features_list),
.reg_fields = omap2_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
.overlay_caps = omap2_dispc_overlay_caps,
.supported_color_modes = omap2_dispc_supported_color_modes,
.supported_scaler_color_modes = COLOR_ARRAY(DRM_FORMAT_XRGB8888),
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
.burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
};
static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
.bp_start = 27,
.sw_max = 64,
.vp_max = 255,
.hp_max = 256,
.mgr_width_start = 10,
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
.max_line_width = 1024,
.min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.features = omap3_dispc_features_list,
.num_features = ARRAY_SIZE(omap3_dispc_features_list),
.reg_fields = omap3_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
.supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
.burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
};
static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.sw_start = 7,
.fp_start = 19,
.bp_start = 31,
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
.mgr_width_start = 10,
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
.max_line_width = 1024,
.min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.features = omap3_dispc_features_list,
.num_features = ARRAY_SIZE(omap3_dispc_features_list),
.reg_fields = omap3_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
.supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
.burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
};
static const struct dispc_features omap36xx_dispc_feats = {
.sw_start = 7,
.fp_start = 19,
.bp_start = 31,
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
.mgr_width_start = 10,
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
.max_line_width = 1024,
.min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.features = omap3_dispc_features_list,
.num_features = ARRAY_SIZE(omap3_dispc_features_list),
.reg_fields = omap3_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3630_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
.supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
.burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
};
static const struct dispc_features am43xx_dispc_feats = {
.sw_start = 7,
.fp_start = 19,
.bp_start = 31,
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
.mgr_width_start = 10,
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
.max_line_width = 1024,
.min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.features = am43xx_dispc_features_list,
.num_features = ARRAY_SIZE(am43xx_dispc_features_list),
.reg_fields = omap3_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
.supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 1,
.num_ovls = 3,
.buffer_size_unit = 1,
.burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
};
static const struct dispc_features omap44xx_dispc_feats = {
.sw_start = 7,
.fp_start = 19,
.bp_start = 31,
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
.mgr_width_start = 10,
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000,
.max_downscale = 4,
.max_line_width = 2048,
.min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
.features = omap4_dispc_features_list,
.num_features = ARRAY_SIZE(omap4_dispc_features_list),
.reg_fields = omap4_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap4_dispc_reg_fields),
.overlay_caps = omap4_dispc_overlay_caps,
.supported_color_modes = omap4_dispc_supported_color_modes,
.num_mgrs = 3,
.num_ovls = 4,
.buffer_size_unit = 16,
.burst_size_unit = 16,
.gfx_fifo_workaround = true,
.set_max_preload = true,
.supports_sync_align = true,
.has_writeback = true,
.supports_double_pixel = true,
.reverse_ilace_field_order = true,
.has_gamma_table = true,
.has_gamma_i734_bug = true,
};
static const struct dispc_features omap54xx_dispc_feats = {
.sw_start = 7,
.fp_start = 19,
.bp_start = 31,
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
.mgr_width_start = 11,
.mgr_height_start = 27,
.mgr_width_max = 4096,
.mgr_height_max = 4096,
.ovl_width_max = 2048,
.ovl_height_max = 4096,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 192000000,
.max_downscale = 4,
.max_line_width = 2048,
.min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
.features = omap5_dispc_features_list,
.num_features = ARRAY_SIZE(omap5_dispc_features_list),
.reg_fields = omap4_dispc_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap4_dispc_reg_fields),
.overlay_caps = omap4_dispc_overlay_caps,
.supported_color_modes = omap4_dispc_supported_color_modes,
.num_mgrs = 4,
.num_ovls = 4,
.buffer_size_unit = 16,
.burst_size_unit = 16,
.gfx_fifo_workaround = true,
.mstandby_workaround = true,
.set_max_preload = true,
.supports_sync_align = true,
.has_writeback = true,
.supports_double_pixel = true,
.reverse_ilace_field_order = true,
.has_gamma_table = true,
.has_gamma_i734_bug = true,
};
static irqreturn_t dispc_irq_handler(int irq, void *arg)
{
struct dispc_device *dispc = arg;
if (!dispc->is_enabled)
return IRQ_NONE;
return dispc->user_handler(irq, dispc->user_data);
}
int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
void *dev_id)
{
int r;
if (dispc->user_handler != NULL)
return -EBUSY;
dispc->user_handler = handler;
dispc->user_data = dev_id;
/* ensure the dispc_irq_handler sees the values above */
smp_wmb();
r = devm_request_irq(&dispc->pdev->dev, dispc->irq, dispc_irq_handler,
IRQF_SHARED, "OMAP DISPC", dispc);
if (r) {
dispc->user_handler = NULL;
dispc->user_data = NULL;
}
return r;
}
void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
{
devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
dispc->user_handler = NULL;
dispc->user_data = NULL;
}
u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
{
u32 limit = 0;
/* Optional maximum memory bandwidth */
of_property_read_u32(dispc->pdev->dev.of_node, "max-memory-bandwidth",
&limit);
return limit;
}
/*
* Workaround for errata i734 in DSS dispc
* - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
*
* For gamma tables to work on LCD1 the GFX plane has to be used at
* least once after DSS HW has come out of reset. The workaround
* sets up a minimal LCD setup with GFX plane and waits for one
* vertical sync irq before disabling the setup and continuing with
* the context restore. The physical outputs are gated during the
* operation. This workaround requires that gamma table's LOADMODE
* is set to 0x2 in DISPC_CONTROL1 register.
*
* For details see:
* OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
* Literature Number: SWPZ037E
* Or some other relevant errata document for the DSS IP version.
*/
static const struct dispc_errata_i734_data {
struct videomode vm;
struct omap_overlay_info ovli;
struct omap_overlay_manager_info mgri;
struct dss_lcd_mgr_config lcd_conf;
} i734 = {
.vm = {
.hactive = 8, .vactive = 1,
.pixelclock = 16000000,
.hsync_len = 8, .hfront_porch = 4, .hback_porch = 4,
.vsync_len = 1, .vfront_porch = 1, .vback_porch = 1,
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
DISPLAY_FLAGS_PIXDATA_POSEDGE,
},
.ovli = {
.screen_width = 1,
.width = 1, .height = 1,
.fourcc = DRM_FORMAT_XRGB8888,
.rotation = DRM_MODE_ROTATE_0,
.rotation_type = OMAP_DSS_ROT_NONE,
.pos_x = 0, .pos_y = 0,
.out_width = 0, .out_height = 0,
.global_alpha = 0xff,
.pre_mult_alpha = 0,
.zorder = 0,
},
.mgri = {
.default_color = 0,
.trans_enabled = false,
.partial_alpha_enabled = false,
.cpr_enable = false,
},
.lcd_conf = {
.io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
.stallmode = false,
.fifohandcheck = false,
.clock_info = {
.lck_div = 1,
.pck_div = 2,
},
.video_port_width = 24,
.lcden_sig_polarity = 0,
},
};
static struct i734_buf {
size_t size;
dma_addr_t paddr;
void *vaddr;
} i734_buf;
static int dispc_errata_i734_wa_init(struct dispc_device *dispc)
{
if (!dispc->feat->has_gamma_i734_bug)
return 0;
i734_buf.size = i734.ovli.width * i734.ovli.height *
color_mode_to_bpp(i734.ovli.fourcc) / 8;
i734_buf.vaddr = dma_alloc_wc(&dispc->pdev->dev, i734_buf.size,
&i734_buf.paddr, GFP_KERNEL);
if (!i734_buf.vaddr) {
dev_err(&dispc->pdev->dev, "%s: dma_alloc_wc failed\n",
__func__);
return -ENOMEM;
}
return 0;
}
static void dispc_errata_i734_wa_fini(struct dispc_device *dispc)
{
if (!dispc->feat->has_gamma_i734_bug)
return;
dma_free_wc(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr,
i734_buf.paddr);
}
static void dispc_errata_i734_wa(struct dispc_device *dispc)
{
u32 framedone_irq = dispc_mgr_get_framedone_irq(dispc,
OMAP_DSS_CHANNEL_LCD);
struct omap_overlay_info ovli;
struct dss_lcd_mgr_config lcd_conf;
u32 gatestate;
unsigned int count;
if (!dispc->feat->has_gamma_i734_bug)
return;
gatestate = REG_GET(dispc, DISPC_CONFIG, 8, 4);
ovli = i734.ovli;
ovli.paddr = i734_buf.paddr;
lcd_conf = i734.lcd_conf;
/* Gate all LCD1 outputs */
REG_FLD_MOD(dispc, DISPC_CONFIG, 0x1f, 8, 4);
/* Setup and enable GFX plane */
dispc_ovl_setup(dispc, OMAP_DSS_GFX, &ovli, &i734.vm, false,
OMAP_DSS_CHANNEL_LCD);
dispc_ovl_enable(dispc, OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
dispc_mgr_setup(dispc, OMAP_DSS_CHANNEL_LCD, &i734.mgri);
dispc_calc_clock_rates(dispc, dss_get_dispc_clk_rate(dispc->dss),
&lcd_conf.clock_info);
dispc_mgr_set_lcd_config(dispc, OMAP_DSS_CHANNEL_LCD, &lcd_conf);
dispc_mgr_set_timings(dispc, OMAP_DSS_CHANNEL_LCD, &i734.vm);
dispc_clear_irqstatus(dispc, framedone_irq);
/* Enable and shut the channel to produce just one frame */
dispc_mgr_enable(dispc, OMAP_DSS_CHANNEL_LCD, true);
dispc_mgr_enable(dispc, OMAP_DSS_CHANNEL_LCD, false);
/* Busy wait for framedone. We can't fiddle with irq handlers
* in PM resume. Typically the loop runs less than 5 times and
* waits less than a micro second.
*/
count = 0;
while (!(dispc_read_irqstatus(dispc) & framedone_irq)) {
if (count++ > 10000) {
dev_err(&dispc->pdev->dev, "%s: framedone timeout\n",
__func__);
break;
}
}
dispc_ovl_enable(dispc, OMAP_DSS_GFX, false);
/* Clear all irq bits before continuing */
dispc_clear_irqstatus(dispc, 0xffffffff);
/* Restore the original state to LCD1 output gates */
REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
}
/* DISPC HW IP initialisation */
static const struct of_device_id dispc_of_match[] = {
{ .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
{ .compatible = "ti,omap3-dispc", .data = &omap36xx_dispc_feats },
{ .compatible = "ti,omap4-dispc", .data = &omap44xx_dispc_feats },
{ .compatible = "ti,omap5-dispc", .data = &omap54xx_dispc_feats },
{ .compatible = "ti,dra7-dispc", .data = &omap54xx_dispc_feats },
{},
};
static const struct soc_device_attribute dispc_soc_devices[] = {
{ .machine = "OMAP3[45]*",
.revision = "ES[12].?", .data = &omap34xx_rev1_0_dispc_feats },
{ .machine = "OMAP3[45]*", .data = &omap34xx_rev3_0_dispc_feats },
{ .machine = "AM35*", .data = &omap34xx_rev3_0_dispc_feats },
{ .machine = "AM43*", .data = &am43xx_dispc_feats },
{ /* sentinel */ }
};
static int dispc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct soc_device_attribute *soc;
struct dss_device *dss = dss_get_device(master);
struct dispc_device *dispc;
u32 rev;
int r = 0;
struct device_node *np = pdev->dev.of_node;
dispc = kzalloc(sizeof(*dispc), GFP_KERNEL);
if (!dispc)
return -ENOMEM;
dispc->pdev = pdev;
platform_set_drvdata(pdev, dispc);
dispc->dss = dss;
/*
* The OMAP3-based models can't be told apart using the compatible
* string, use SoC device matching.
*/
soc = soc_device_match(dispc_soc_devices);
if (soc)
dispc->feat = soc->data;
else
dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
r = dispc_errata_i734_wa_init(dispc);
if (r)
goto err_free;
dispc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dispc->base)) {
r = PTR_ERR(dispc->base);
goto err_free;
}
dispc->irq = platform_get_irq(dispc->pdev, 0);
if (dispc->irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
goto err_free;
}
if (np && of_property_read_bool(np, "syscon-pol")) {
dispc->syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
if (IS_ERR(dispc->syscon_pol)) {
dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
r = PTR_ERR(dispc->syscon_pol);
goto err_free;
}
if (of_property_read_u32_index(np, "syscon-pol", 1,
&dispc->syscon_pol_offset)) {
dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
r = -EINVAL;
goto err_free;
}
}
r = dispc_init_gamma_tables(dispc);
if (r)
goto err_free;
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get(dispc);
if (r)
goto err_runtime_get;
_omap_dispc_initial_config(dispc);
rev = dispc_read_reg(dispc, DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dispc_runtime_put(dispc);
dss->dispc = dispc;
dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
dispc);
return 0;
err_runtime_get:
pm_runtime_disable(&pdev->dev);
err_free:
kfree(dispc);
return r;
}
static void dispc_unbind(struct device *dev, struct device *master, void *data)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
struct dss_device *dss = dispc->dss;
dss_debugfs_remove_file(dispc->debugfs);
dss->dispc = NULL;
pm_runtime_disable(dev);
dispc_errata_i734_wa_fini(dispc);
kfree(dispc);
}
static const struct component_ops dispc_component_ops = {
.bind = dispc_bind,
.unbind = dispc_unbind,
};
static int dispc_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &dispc_component_ops);
}
static void dispc_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dispc_component_ops);
}
static __maybe_unused int dispc_runtime_suspend(struct device *dev)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
dispc->is_enabled = false;
/* ensure the dispc_irq_handler sees the is_enabled value */
smp_wmb();
/* wait for current handler to finish before turning the DISPC off */
synchronize_irq(dispc->irq);
dispc_save_context(dispc);
return 0;
}
static __maybe_unused int dispc_runtime_resume(struct device *dev)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
/*
* The reset value for load mode is 0 (OMAP_DSS_LOAD_CLUT_AND_FRAME)
* but we always initialize it to 2 (OMAP_DSS_LOAD_FRAME_ONLY) in
* _omap_dispc_initial_config(). We can thus use it to detect if
* we have lost register context.
*/
if (REG_GET(dispc, DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
_omap_dispc_initial_config(dispc);
dispc_errata_i734_wa(dispc);
dispc_restore_context(dispc);
dispc_restore_gamma_tables(dispc);
}
dispc->is_enabled = true;
/* ensure the dispc_irq_handler sees the is_enabled value */
smp_wmb();
return 0;
}
static const struct dev_pm_ops dispc_pm_ops = {
SET_RUNTIME_PM_OPS(dispc_runtime_suspend, dispc_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
.remove_new = dispc_remove,
.driver = {
.name = "omapdss_dispc",
.pm = &dispc_pm_ops,
.of_match_table = dispc_of_match,
.suppress_bind_attrs = true,
},
};
| linux-master | drivers/gpu/drm/omapdrm/dss/dispc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <[email protected]>
*/
#define DSS_SUBSYS_NAME "HDMICORE"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/sys_soc.h>
#include <sound/asound.h>
#include <sound/asoundef.h>
#include "hdmi4_core.h"
#define HDMI_CORE_AV 0x500
static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
{
return core->base + HDMI_CORE_AV;
}
int hdmi4_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
/* Turn on CLK for DDC */
REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0);
/* IN_PROG */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 1) {
/* Abort transaction */
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xf, 3, 0);
/* IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
4, 4, 0) != 0) {
DSSERR("Timeout aborting DDC transaction\n");
return -ETIMEDOUT;
}
}
/* Clk SCL Devices */
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xA, 3, 0);
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
4, 4, 0) != 0) {
DSSERR("Timeout starting SCL clock\n");
return -ETIMEDOUT;
}
/* Clear FIFO */
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x9, 3, 0);
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
4, 4, 0) != 0) {
DSSERR("Timeout clearing DDC fifo\n");
return -ETIMEDOUT;
}
return 0;
}
int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u32 i;
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
4, 4, 0) != 0) {
DSSERR("Timeout waiting DDC to be ready\n");
return -ETIMEDOUT;
}
/* Load Segment Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, block / 2, 7, 0);
/* Load Slave Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
/* Load Offset Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, block % 2 ? 0x80 : 0, 7, 0);
/* Load Byte Count */
REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, len, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
/* Set DDC_CMD */
if (block)
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
else
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
/* HDMI_CORE_DDC_STATUS_BUS_LOW */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
DSSERR("I2C Bus Low?\n");
return -EIO;
}
/* HDMI_CORE_DDC_STATUS_NO_ACK */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
DSSERR("I2C No Ack\n");
return -EIO;
}
for (i = 0; i < len; ++i) {
int t;
/* IN_PROG */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 0) {
DSSERR("operation stopped when reading edid\n");
return -EIO;
}
t = 0;
/* FIFO_EMPTY */
while (REG_GET(base, HDMI_CORE_DDC_STATUS, 2, 2) == 1) {
if (t++ > 10000) {
DSSERR("timeout reading edid\n");
return -ETIMEDOUT;
}
udelay(1);
}
buf[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
}
return 0;
}
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
{
DSSDBG("Enter hdmi_core_init\n");
/* video core */
video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
video_cfg->hdmi_dvi = HDMI_DVI;
video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
}
void hdmi4_core_powerdown_disable(struct hdmi_core_data *core)
{
DSSDBG("Enter hdmi4_core_powerdown_disable\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
}
static void hdmi_core_swreset_release(struct hdmi_core_data *core)
{
DSSDBG("Enter hdmi_core_swreset_release\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0);
}
static void hdmi_core_swreset_assert(struct hdmi_core_data *core)
{
DSSDBG("Enter hdmi_core_swreset_assert\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0);
}
/* HDMI_CORE_VIDEO_CONFIG */
static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_video_config *cfg)
{
u32 r = 0;
void __iomem *core_sys_base = core->base;
void __iomem *core_av_base = hdmi_av_base(core);
/* sys_ctrl1 default configuration not tunable */
r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1);
r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS, 2, 2);
r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE, 1, 1);
hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1, r);
REG_FLD_MOD(core_sys_base,
HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
/* Vid_Mode */
r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE);
/* dither truncation configuration */
if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
r = FLD_MOD(r, 1, 5, 5);
} else {
r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
r = FLD_MOD(r, 0, 5, 5);
}
hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r);
/* HDMI_Ctrl */
r = hdmi_read_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL);
r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
hdmi_write_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL, r);
/* TMDS_CTRL */
REG_FLD_MOD(core_sys_base,
HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
}
static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
struct hdmi_avi_infoframe *frame)
{
void __iomem *av_base = hdmi_av_base(core);
u8 data[HDMI_INFOFRAME_SIZE(AVI)];
int i;
hdmi_avi_infoframe_pack(frame, data, sizeof(data));
print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data,
HDMI_INFOFRAME_SIZE(AVI), false);
for (i = 0; i < sizeof(data); ++i) {
hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_BASE + i * 4,
data[i]);
}
}
static void hdmi_core_av_packet_config(struct hdmi_core_data *core,
struct hdmi_core_packet_enable_repeat repeat_cfg)
{
/* enable/repeat the infoframe */
hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL1,
(repeat_cfg.audio_pkt << 5) |
(repeat_cfg.audio_pkt_repeat << 4) |
(repeat_cfg.avi_infoframe << 1) |
(repeat_cfg.avi_infoframe_repeat));
/* enable/repeat the packet */
hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL2,
(repeat_cfg.gen_cntrl_pkt << 3) |
(repeat_cfg.gen_cntrl_pkt_repeat << 2) |
(repeat_cfg.generic_pkt << 1) |
(repeat_cfg.generic_pkt_repeat));
}
void hdmi4_configure(struct hdmi_core_data *core,
struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
struct videomode vm;
struct hdmi_video_format video_format;
/* HDMI core */
struct hdmi_core_video_config v_core_cfg;
struct hdmi_core_packet_enable_repeat repeat_cfg = { 0 };
hdmi_core_init(&v_core_cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
hdmi_wp_video_config_interface(wp, &vm);
/*
* configure core video part
* set software reset in the core
*/
hdmi_core_swreset_assert(core);
v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode;
hdmi_core_video_config(core, &v_core_cfg);
/* release software reset in the core */
hdmi_core_swreset_release(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
hdmi_core_write_avi_infoframe(core, &cfg->infoframe);
/* enable/repeat the infoframe */
repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
/* wakeup */
repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
}
hdmi_core_av_packet_config(core, repeat_cfg);
}
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
int i;
#define CORE_REG(i, name) name(i)
#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(core->base, r))
#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(hdmi_av_base(core), r))
#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
(i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
hdmi_read_reg(hdmi_av_base(core), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDH);
DUMPCORE(HDMI_CORE_SYS_DEV_REV);
DUMPCORE(HDMI_CORE_SYS_SRST);
DUMPCORE(HDMI_CORE_SYS_SYS_CTRL1);
DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
DUMPCORE(HDMI_CORE_SYS_SYS_CTRL3);
DUMPCORE(HDMI_CORE_SYS_DE_DLY);
DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
DUMPCORE(HDMI_CORE_SYS_DE_TOP);
DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
DUMPCORE(HDMI_CORE_SYS_DE_LINL);
DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
DUMPCORE(HDMI_CORE_SYS_HRES_L);
DUMPCORE(HDMI_CORE_SYS_HRES_H);
DUMPCORE(HDMI_CORE_SYS_VRES_L);
DUMPCORE(HDMI_CORE_SYS_VRES_H);
DUMPCORE(HDMI_CORE_SYS_IADJUST);
DUMPCORE(HDMI_CORE_SYS_POLDETECT);
DUMPCORE(HDMI_CORE_SYS_HWIDTH1);
DUMPCORE(HDMI_CORE_SYS_HWIDTH2);
DUMPCORE(HDMI_CORE_SYS_VWIDTH);
DUMPCORE(HDMI_CORE_SYS_VID_CTRL);
DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
DUMPCORE(HDMI_CORE_SYS_VID_MODE);
DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
DUMPCORE(HDMI_CORE_SYS_VID_BLANK3);
DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
DUMPCORE(HDMI_CORE_SYS_DC_HEADER);
DUMPCORE(HDMI_CORE_SYS_VID_DITHER);
DUMPCORE(HDMI_CORE_SYS_RGB2XVYCC_CT);
DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_LOW);
DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_UP);
DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_LOW);
DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_UP);
DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_LOW);
DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_UP);
DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_LOW);
DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_UP);
DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
DUMPCORE(HDMI_CORE_SYS_INTR1);
DUMPCORE(HDMI_CORE_SYS_INTR2);
DUMPCORE(HDMI_CORE_SYS_INTR3);
DUMPCORE(HDMI_CORE_SYS_INTR4);
DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK1);
DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK2);
DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK3);
DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK4);
DUMPCORE(HDMI_CORE_SYS_INTR_CTRL);
DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
DUMPCORE(HDMI_CORE_DDC_ADDR);
DUMPCORE(HDMI_CORE_DDC_SEGM);
DUMPCORE(HDMI_CORE_DDC_OFFSET);
DUMPCORE(HDMI_CORE_DDC_COUNT1);
DUMPCORE(HDMI_CORE_DDC_COUNT2);
DUMPCORE(HDMI_CORE_DDC_STATUS);
DUMPCORE(HDMI_CORE_DDC_CMD);
DUMPCORE(HDMI_CORE_DDC_DATA);
DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
DUMPCOREAV(HDMI_CORE_AV_ASRC);
DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
DUMPCOREAV(HDMI_CORE_AV_DPD);
DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
}
static void hdmi_core_audio_config(struct hdmi_core_data *core,
struct hdmi_core_audio_config *cfg)
{
u32 r;
void __iomem *av_base = hdmi_av_base(core);
/*
* Parameters for generation of Audio Clock Recovery packets
*/
REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
REG_FLD_MOD(av_base, HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
REG_FLD_MOD(av_base,
HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
REG_FLD_MOD(av_base,
HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
} else {
REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
cfg->aud_par_busclk, 7, 0);
REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
(cfg->aud_par_busclk >> 8), 7, 0);
REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
(cfg->aud_par_busclk >> 16), 7, 0);
}
/* Set ACR clock divisor */
if (cfg->use_mclk)
REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
cfg->mclk_mode, 2, 0);
r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
/*
* Use TMDS clock for ACR packets. For devices that use
* the MCLK, this is the first part of the MCLK initialization.
*/
r = FLD_MOD(r, 0, 2, 2);
r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
r = FLD_MOD(r, cfg->cts_mode, 0, 0);
hdmi_write_reg(av_base, HDMI_CORE_AV_ACR_CTRL, r);
/* For devices using MCLK, this completes its initialization. */
if (cfg->use_mclk)
REG_FLD_MOD(av_base, HDMI_CORE_AV_ACR_CTRL, 1, 2, 2);
/* Override of SPDIF sample frequency with value in I2S_CHST4 */
REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
cfg->fs_override, 1, 1);
/*
* Set IEC-60958-3 channel status word. It is passed to the IP
* just as it is received. The user of the driver is responsible
* for its contents.
*/
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
cfg->iec60958_cfg->status[0]);
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
cfg->iec60958_cfg->status[1]);
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
cfg->iec60958_cfg->status[2]);
/* yes, this is correct: status[3] goes to CHST4 register */
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
cfg->iec60958_cfg->status[3]);
/* yes, this is correct: status[4] goes to CHST5 register */
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
cfg->iec60958_cfg->status[4]);
/* set I2S parameters */
r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
cfg->i2s_cfg.in_length_bits, 3, 0);
/* Audio channels and mode parameters */
REG_FLD_MOD(av_base, HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
r = hdmi_read_reg(av_base, HDMI_CORE_AV_AUD_MODE);
r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
r = FLD_MOD(r, cfg->en_spdif, 1, 1);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
/* Audio channel mappings */
/* TODO: Make channel mapping dynamic. For now, map channels
* in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
* HDMI speaker order is different. See CEA-861 Section 6.6.2.
*/
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
}
static void hdmi_core_audio_infoframe_cfg(struct hdmi_core_data *core,
struct snd_cea_861_aud_if *info_aud)
{
u8 sum = 0, checksum = 0;
void __iomem *av_base = hdmi_av_base(core);
/*
* Set audio info frame type, version and length as
* described in HDMI 1.4a Section 8.2.2 specification.
* Checksum calculation is defined in Section 5.3.5.
*/
hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_TYPE, 0x84);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_VERS, 0x01);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
sum += 0x84 + 0x001 + 0x00a;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
info_aud->db1_ct_cc);
sum += info_aud->db1_ct_cc;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
info_aud->db2_sf_ss);
sum += info_aud->db2_sf_ss;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
sum += info_aud->db3;
/*
* The OMAP HDMI IP requires to use the 8-channel channel code when
* transmitting more than two channels.
*/
if (info_aud->db4_ca != 0x00)
info_aud->db4_ca = 0x13;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
sum += info_aud->db4_ca;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
info_aud->db5_dminh_lsv);
sum += info_aud->db5_dminh_lsv;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
checksum = 0x100 - sum;
hdmi_write_reg(av_base,
HDMI_CORE_AV_AUDIO_CHSUM, checksum);
/*
* TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
* is available.
*/
}
int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct omap_dss_audio *audio, u32 pclk)
{
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
if (!audio || !audio->iec || !audio->cea || !core)
return -EINVAL;
acore.iec60958_cfg = audio->iec;
/*
* In the IEC-60958 status word, check if the audio sample word length
* is 16-bit as several optimizations can be performed in such case.
*/
if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
word_length_16b = true;
/* I2S configuration. See Phillips' specification */
if (word_length_16b)
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
else
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
* The I2S input word length is twice the length given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
acore.i2s_cfg.in_length_bits = audio->iec->status[4]
& IEC958_AES4_CON_WORDLEN;
if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
acore.i2s_cfg.in_length_bits++;
acore.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
acore.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
acore.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
acore.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
/* convert sample frequency to a number */
switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
case IEC958_AES3_CON_FS_32000:
fs_nr = 32000;
break;
case IEC958_AES3_CON_FS_44100:
fs_nr = 44100;
break;
case IEC958_AES3_CON_FS_48000:
fs_nr = 48000;
break;
case IEC958_AES3_CON_FS_88200:
fs_nr = 88200;
break;
case IEC958_AES3_CON_FS_96000:
fs_nr = 96000;
break;
case IEC958_AES3_CON_FS_176400:
fs_nr = 176400;
break;
case IEC958_AES3_CON_FS_192000:
fs_nr = 192000;
break;
default:
return -EINVAL;
}
hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
acore.cts = cts;
if (core->cts_swmode) {
acore.aud_par_busclk = 0;
acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
acore.use_mclk = core->audio_use_mclk;
} else {
acore.aud_par_busclk = (((128 * 31) - 1) << 8);
acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
acore.use_mclk = true;
}
if (acore.use_mclk)
acore.mclk_mode = HDMI_AUDIO_MCLK_128FS;
/* Audio channels settings */
channel_count = (audio->cea->db1_ct_cc &
CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
switch (channel_count) {
case 2:
audio_format.active_chnnls_msk = 0x03;
break;
case 3:
audio_format.active_chnnls_msk = 0x07;
break;
case 4:
audio_format.active_chnnls_msk = 0x0f;
break;
case 5:
audio_format.active_chnnls_msk = 0x1f;
break;
case 6:
audio_format.active_chnnls_msk = 0x3f;
break;
case 7:
audio_format.active_chnnls_msk = 0x7f;
break;
case 8:
audio_format.active_chnnls_msk = 0xff;
break;
default:
return -EINVAL;
}
/*
* the HDMI IP needs to enable four stereo channels when transmitting
* more than 2 audio channels. Similarly, the channel count in the
* Audio InfoFrame has to match the sample_present bits (some channels
* are padded with zeroes)
*/
if (channel_count == 2) {
audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
acore.layout = HDMI_AUDIO_LAYOUT_2CH;
} else {
audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
HDMI_AUDIO_I2S_SD3_EN;
acore.layout = HDMI_AUDIO_LAYOUT_8CH;
audio->cea->db1_ct_cc = 7;
}
acore.en_spdif = false;
/* use sample frequency from channel status word */
acore.fs_override = true;
/* enable ACR packets */
acore.en_acr_pkt = true;
/* disable direct streaming digital audio */
acore.en_dsd_audio = false;
/* use parallel audio interface */
acore.en_parallel_aud_input = true;
/* DMA settings */
if (word_length_16b)
audio_dma.transfer_size = 0x10;
else
audio_dma.transfer_size = 0x20;
audio_dma.block_size = 0xC0;
audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
audio_dma.fifo_threshold = 0x20; /* in number of samples */
/* audio FIFO format settings */
if (word_length_16b) {
audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
} else {
audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
}
audio_format.type = HDMI_AUDIO_TYPE_LPCM;
audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
/* disable start/stop signals of IEC 60958 blocks */
audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
/* configure DMA and audio FIFO format*/
hdmi_wp_audio_config_dma(wp, &audio_dma);
hdmi_wp_audio_config_format(wp, &audio_format);
/* configure the core*/
hdmi_core_audio_config(core, &acore);
/* configure CEA 861 audio infoframe*/
hdmi_core_audio_infoframe_cfg(core, audio->cea);
return 0;
}
int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
{
REG_FLD_MOD(hdmi_av_base(core),
HDMI_CORE_AV_AUD_MODE, true, 0, 0);
hdmi_wp_audio_core_req_enable(wp, true);
return 0;
}
void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
{
REG_FLD_MOD(hdmi_av_base(core),
HDMI_CORE_AV_AUD_MODE, false, 0, 0);
hdmi_wp_audio_core_req_enable(wp, false);
}
struct hdmi4_features {
bool cts_swmode;
bool audio_use_mclk;
};
static const struct hdmi4_features hdmi4430_es1_features = {
.cts_swmode = false,
.audio_use_mclk = false,
};
static const struct hdmi4_features hdmi4430_es2_features = {
.cts_swmode = true,
.audio_use_mclk = false,
};
static const struct hdmi4_features hdmi4_features = {
.cts_swmode = true,
.audio_use_mclk = true,
};
static const struct soc_device_attribute hdmi4_soc_devices[] = {
{
.machine = "OMAP4430",
.revision = "ES1.?",
.data = &hdmi4430_es1_features,
},
{
.machine = "OMAP4430",
.revision = "ES2.?",
.data = &hdmi4430_es2_features,
},
{
.family = "OMAP4",
.data = &hdmi4_features,
},
{ /* sentinel */ }
};
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
const struct hdmi4_features *features;
const struct soc_device_attribute *soc;
soc = soc_device_match(hdmi4_soc_devices);
if (!soc)
return -ENODEV;
features = soc->data;
core->cts_swmode = features->cts_swmode;
core->audio_use_mclk = features->audio_use_mclk;
core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base))
return PTR_ERR(core->base);
return 0;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi4_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*/
#define DSS_SUBSYS_NAME "SDI"
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
#include <drm/drm_bridge.h>
#include "dss.h"
#include "omapdss.h"
struct sdi_device {
struct platform_device *pdev;
struct dss_device *dss;
bool update_enabled;
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
unsigned long pixelclock;
int datapairs;
struct omap_dss_device output;
struct drm_bridge bridge;
};
#define drm_bridge_to_sdi(bridge) \
container_of(bridge, struct sdi_device, bridge)
struct sdi_clk_calc_ctx {
struct sdi_device *sdi;
unsigned long pck_min, pck_max;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct sdi_clk_calc_ctx *ctx = data;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
return true;
}
static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct sdi_clk_calc_ctx *ctx = data;
ctx->fck = fck;
return dispc_div_calc(ctx->sdi->dss->dispc, fck,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
unsigned long *fck,
struct dispc_clock_info *dispc_cinfo)
{
int i;
struct sdi_clk_calc_ctx ctx;
/*
* DSS fclk gives us very few possibilities, so finding a good pixel
* clock may not be possible. We try multiple times to find the clock,
* each time widening the pixel clock range we look for, up to
* +/- 1MHz.
*/
for (i = 0; i < 10; ++i) {
bool ok;
memset(&ctx, 0, sizeof(ctx));
ctx.sdi = sdi;
if (pclk > 1000 * i * i * i)
ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
else
ctx.pck_min = 0;
ctx.pck_max = pclk + 1000 * i * i * i;
ok = dss_div_calc(sdi->dss, pclk, ctx.pck_min,
dpi_calc_dss_cb, &ctx);
if (ok) {
*fck = ctx.fck;
*dispc_cinfo = ctx.dispc_cinfo;
return 0;
}
}
return -EINVAL;
}
static void sdi_config_lcd_manager(struct sdi_device *sdi)
{
sdi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
sdi->mgr_config.stallmode = false;
sdi->mgr_config.fifohandcheck = false;
sdi->mgr_config.video_port_width = 24;
sdi->mgr_config.lcden_sig_polarity = 1;
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static int sdi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
bridge, flags);
}
static enum drm_mode_status
sdi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
unsigned long pixelclock = mode->clock * 1000;
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int ret;
if (pixelclock == 0)
return MODE_NOCLOCK;
ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
if (ret < 0)
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static bool sdi_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
unsigned long pixelclock = mode->clock * 1000;
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
unsigned long pck;
int ret;
ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
if (ret < 0)
return false;
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
if (pck != pixelclock)
dev_dbg(&sdi->pdev->dev,
"pixel clock adjusted from %lu Hz to %lu Hz\n",
pixelclock, pck);
adjusted_mode->clock = pck / 1000;
return true;
}
static void sdi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
sdi->pixelclock = adjusted_mode->clock * 1000;
}
static void sdi_bridge_enable(struct drm_bridge *bridge)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
r = regulator_enable(sdi->vdds_sdi_reg);
if (r)
return;
r = dispc_runtime_get(sdi->dss->dispc);
if (r)
goto err_get_dispc;
r = sdi_calc_clock_div(sdi, sdi->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
sdi->mgr_config.clock_info = dispc_cinfo;
r = dss_set_fck_rate(sdi->dss, fck);
if (r)
goto err_set_dss_clock_div;
sdi_config_lcd_manager(sdi);
/*
* LCLK and PCLK divisors are located in shadow registers, and we
* normally write them to DISPC registers when enabling the output.
* However, SDI uses pck-free as source clock for its PLL, and pck-free
* is affected by the divisors. And as we need the PLL before enabling
* the output, we need to write the divisors early.
*
* It seems just writing to the DISPC register is enough, and we don't
* need to care about the shadow register mechanism for pck-free. The
* exact reason for this is unknown.
*/
dispc_mgr_set_clock_div(sdi->dss->dispc, sdi->output.dispc_channel,
&sdi->mgr_config.clock_info);
dss_sdi_init(sdi->dss, sdi->datapairs);
r = dss_sdi_enable(sdi->dss);
if (r)
goto err_sdi_enable;
mdelay(2);
r = dss_mgr_enable(&sdi->output);
if (r)
goto err_mgr_enable;
return;
err_mgr_enable:
dss_sdi_disable(sdi->dss);
err_sdi_enable:
err_set_dss_clock_div:
err_calc_clock_div:
dispc_runtime_put(sdi->dss->dispc);
err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
}
static void sdi_bridge_disable(struct drm_bridge *bridge)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
dss_mgr_disable(&sdi->output);
dss_sdi_disable(sdi->dss);
dispc_runtime_put(sdi->dss->dispc);
regulator_disable(sdi->vdds_sdi_reg);
}
static const struct drm_bridge_funcs sdi_bridge_funcs = {
.attach = sdi_bridge_attach,
.mode_valid = sdi_bridge_mode_valid,
.mode_fixup = sdi_bridge_mode_fixup,
.mode_set = sdi_bridge_mode_set,
.enable = sdi_bridge_enable,
.disable = sdi_bridge_disable,
};
static void sdi_bridge_init(struct sdi_device *sdi)
{
sdi->bridge.funcs = &sdi_bridge_funcs;
sdi->bridge.of_node = sdi->pdev->dev.of_node;
sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
drm_bridge_add(&sdi->bridge);
}
static void sdi_bridge_cleanup(struct sdi_device *sdi)
{
drm_bridge_remove(&sdi->bridge);
}
/* -----------------------------------------------------------------------------
* Initialisation and Cleanup
*/
static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
int r;
sdi_bridge_init(sdi);
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_port = 1;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
r = omapdss_device_init_output(out, &sdi->bridge);
if (r < 0) {
sdi_bridge_cleanup(sdi);
return r;
}
omapdss_device_register(out);
return 0;
}
static void sdi_uninit_output(struct sdi_device *sdi)
{
omapdss_device_unregister(&sdi->output);
omapdss_device_cleanup_output(&sdi->output);
sdi_bridge_cleanup(sdi);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
struct device_node *port)
{
struct sdi_device *sdi;
struct device_node *ep;
u32 datapairs;
int r;
sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
if (!sdi)
return -ENOMEM;
ep = of_get_next_child(port, NULL);
if (!ep) {
r = 0;
goto err_free;
}
r = of_property_read_u32(ep, "datapairs", &datapairs);
of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
goto err_free;
}
sdi->datapairs = datapairs;
sdi->dss = dss;
sdi->pdev = pdev;
port->data = sdi;
sdi->vdds_sdi_reg = devm_regulator_get(&pdev->dev, "vdds_sdi");
if (IS_ERR(sdi->vdds_sdi_reg)) {
r = PTR_ERR(sdi->vdds_sdi_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
goto err_free;
}
r = sdi_init_output(sdi);
if (r)
goto err_free;
return 0;
err_free:
kfree(sdi);
return r;
}
void sdi_uninit_port(struct device_node *port)
{
struct sdi_device *sdi = port->data;
if (!sdi)
return;
sdi_uninit_output(sdi);
kfree(sdi);
}
| linux-master | drivers/gpu/drm/omapdrm/dss/sdi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI interface DSS driver for TI's OMAP4 family of SoCs.
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <[email protected]>
*/
#define DSS_SUBSYS_NAME "HDMI"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_edid.h>
#include "omapdss.h"
#include "hdmi4_core.h"
#include "hdmi4_cec.h"
#include "dss.h"
#include "hdmi.h"
static int hdmi_runtime_get(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_get\n");
r = pm_runtime_get_sync(&hdmi->pdev->dev);
if (WARN_ON(r < 0)) {
pm_runtime_put_noidle(&hdmi->pdev->dev);
return r;
}
return 0;
}
static void hdmi_runtime_put(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_put\n");
r = pm_runtime_put_sync(&hdmi->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
struct omap_hdmi *hdmi = data;
struct hdmi_wp_data *wp = &hdmi->wp;
u32 irqstatus;
irqstatus = hdmi_wp_get_irqstatus(wp);
hdmi_wp_set_irqstatus(wp, irqstatus);
if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
/*
* If we get both connect and disconnect interrupts at the same
* time, turn off the PHY, clear interrupts, and restart, which
* raises connect interrupt if a cable is connected, or nothing
* if cable is not connected.
*/
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
HDMI_IRQ_LINK_DISCONNECT);
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
} else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
} else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
}
if (irqstatus & HDMI_IRQ_CORE) {
u32 intr4 = hdmi_read_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4);
hdmi_write_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4, intr4);
if (intr4 & 8)
hdmi4_cec_irq(&hdmi->core);
}
return IRQ_HANDLED;
}
static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
if (hdmi->core.core_pwr_cnt++)
return 0;
r = regulator_enable(hdmi->vdda_reg);
if (r)
goto err_reg_enable;
r = hdmi_runtime_get(hdmi);
if (r)
goto err_runtime_get;
hdmi4_core_powerdown_disable(&hdmi->core);
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(hdmi->dss, DSS_HDMI_M_PCLK);
hdmi->core_enabled = true;
return 0;
err_runtime_get:
regulator_disable(hdmi->vdda_reg);
err_reg_enable:
hdmi->core.core_pwr_cnt--;
return r;
}
static void hdmi_power_off_core(struct omap_hdmi *hdmi)
{
if (--hdmi->core.core_pwr_cnt)
return;
hdmi->core_enabled = false;
hdmi_runtime_put(hdmi);
regulator_disable(hdmi->vdda_reg);
}
static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
const struct videomode *vm;
struct hdmi_wp_data *wp = &hdmi->wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned int pc;
r = hdmi_power_on_core(hdmi);
if (r)
return r;
/* disable and clear irqs */
hdmi_wp_clear_irqenable(wp, ~HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(wp, ~HDMI_IRQ_CORE);
vm = &hdmi->cfg.vm;
DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
vm->vactive);
pc = vm->pixelclock;
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
pc *= 10;
dss_pll_calc_b(&hdmi->pll.pll, clk_get_rate(hdmi->pll.pll.clkin),
pc, &hdmi_cinfo);
r = dss_pll_enable(&hdmi->pll.pll);
if (r) {
DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
r = dss_pll_set_config(&hdmi->pll.pll, &hdmi_cinfo);
if (r) {
DSSERR("Failed to configure PLL\n");
goto err_pll_cfg;
}
r = hdmi_phy_configure(&hdmi->phy, hdmi_cinfo.clkdco,
hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to configure PHY\n");
goto err_phy_cfg;
}
r = hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
if (r)
goto err_phy_pwr;
hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
r = hdmi_wp_video_start(&hdmi->wp);
if (r)
goto err_vid_enable;
hdmi_wp_set_irqenable(wp,
HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
err_vid_enable:
dss_mgr_disable(&hdmi->output);
err_mgr_enable:
hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
err_pll_cfg:
dss_pll_disable(&hdmi->pll.pll);
err_pll_enable:
hdmi_power_off_core(hdmi);
return -EIO;
}
static void hdmi_power_off_full(struct omap_hdmi *hdmi)
{
hdmi_wp_clear_irqenable(&hdmi->wp, ~HDMI_IRQ_CORE);
hdmi_wp_video_stop(&hdmi->wp);
dss_mgr_disable(&hdmi->output);
hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
dss_pll_disable(&hdmi->pll.pll);
hdmi_power_off_core(hdmi);
}
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
mutex_lock(&hdmi->lock);
if (hdmi_runtime_get(hdmi)) {
mutex_unlock(&hdmi->lock);
return 0;
}
hdmi_wp_dump(&hdmi->wp, s);
hdmi_pll_dump(&hdmi->pll, s);
hdmi_phy_dump(&hdmi->phy, s);
hdmi4_core_dump(&hdmi->core, s);
hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
return 0;
}
static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
hdmi_wp_audio_enable(&hd->wp, true);
hdmi4_audio_start(&hd->core, &hd->wp);
}
static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
{
hdmi4_audio_stop(&hd->core, &hd->wp);
hdmi_wp_audio_enable(&hd->wp, false);
}
int hdmi4_core_enable(struct hdmi_core_data *core)
{
struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
int r = 0;
DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
mutex_lock(&hdmi->lock);
r = hdmi_power_on_core(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
mutex_unlock(&hdmi->lock);
return 0;
err0:
mutex_unlock(&hdmi->lock);
return r;
}
void hdmi4_core_disable(struct hdmi_core_data *core)
{
struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
DSSDBG("Enter omapdss_hdmi4_core_disable\n");
mutex_lock(&hdmi->lock);
hdmi_power_off_core(hdmi);
mutex_unlock(&hdmi->lock);
}
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static int hdmi4_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
bridge, flags);
}
static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
mutex_lock(&hdmi->lock);
drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
static void hdmi4_bridge_enable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
struct drm_atomic_state *state = bridge_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
unsigned long flags;
int ret;
/*
* None of these should fail, as the bridge can't be enabled without a
* valid CRTC to connector path with fully populated new states.
*/
connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (WARN_ON(!connector))
return;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
if (WARN_ON(!crtc_state))
return;
hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
? HDMI_HDMI : HDMI_DVI;
if (connector->display_info.is_hdmi) {
const struct drm_display_mode *mode;
struct hdmi_avi_infoframe avi;
mode = &crtc_state->adjusted_mode;
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
mode);
if (ret == 0)
hdmi->cfg.infoframe = avi;
}
mutex_lock(&hdmi->lock);
ret = hdmi_power_on_full(hdmi);
if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
ret = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
&hdmi->audio_config,
hdmi->cfg.vm.pixelclock);
if (ret) {
DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
}
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
if (hdmi->audio_configured && hdmi->audio_playing)
hdmi_start_audio_stream(hdmi);
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
done:
mutex_unlock(&hdmi->lock);
}
static void hdmi4_bridge_disable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
hdmi_stop_audio_stream(hdmi);
hdmi->display_enabled = false;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
hdmi_power_off_full(hdmi);
mutex_unlock(&hdmi->lock);
}
static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
if (status == connector_status_disconnected)
hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
struct edid *edid = NULL;
unsigned int cec_addr;
bool need_enable;
int r;
need_enable = hdmi->core_enabled == false;
if (need_enable) {
r = hdmi4_core_enable(&hdmi->core);
if (r)
return NULL;
}
mutex_lock(&hdmi->lock);
r = hdmi_runtime_get(hdmi);
BUG_ON(r);
r = hdmi4_core_ddc_init(&hdmi->core);
if (r)
goto done;
edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
done:
hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
if (edid && edid->extensions) {
unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
} else {
cec_addr = CEC_PHYS_ADDR_INVALID;
}
hdmi4_cec_set_phys_addr(&hdmi->core, cec_addr);
if (need_enable)
hdmi4_core_disable(&hdmi->core);
return edid;
}
static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
.attach = hdmi4_bridge_attach,
.mode_set = hdmi4_bridge_mode_set,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = hdmi4_bridge_enable,
.atomic_disable = hdmi4_bridge_disable,
.hpd_notify = hdmi4_bridge_hpd_notify,
.get_edid = hdmi4_bridge_get_edid,
};
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
{
hdmi->bridge.funcs = &hdmi4_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&hdmi->bridge);
}
static void hdmi4_bridge_cleanup(struct omap_hdmi *hdmi)
{
drm_bridge_remove(&hdmi->bridge);
}
/* -----------------------------------------------------------------------------
* Audio Callbacks
*/
static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
mutex_lock(&hd->lock);
WARN_ON(hd->audio_abort_cb != NULL);
hd->audio_abort_cb = abort_cb;
mutex_unlock(&hd->lock);
return 0;
}
static int hdmi_audio_shutdown(struct device *dev)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
mutex_lock(&hd->lock);
hd->audio_abort_cb = NULL;
hd->audio_configured = false;
hd->audio_playing = false;
mutex_unlock(&hd->lock);
return 0;
}
static int hdmi_audio_start(struct device *dev)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&hd->audio_playing_lock, flags);
if (hd->display_enabled) {
if (!hdmi_mode_has_audio(&hd->cfg))
DSSERR("%s: Video mode does not support audio\n",
__func__);
hdmi_start_audio_stream(hd);
}
hd->audio_playing = true;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
return 0;
}
static void hdmi_audio_stop(struct device *dev)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
spin_lock_irqsave(&hd->audio_playing_lock, flags);
if (hd->display_enabled)
hdmi_stop_audio_stream(hd);
hd->audio_playing = false;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
}
static int hdmi_audio_config(struct device *dev,
struct omap_dss_audio *dss_audio)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&hd->lock);
if (hd->display_enabled) {
ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
hd->cfg.vm.pixelclock);
if (ret)
goto out;
}
hd->audio_configured = true;
hd->audio_config = *dss_audio;
out:
mutex_unlock(&hd->lock);
return ret;
}
static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
.audio_startup = hdmi_audio_startup,
.audio_shutdown = hdmi_audio_shutdown,
.audio_start = hdmi_audio_start,
.audio_stop = hdmi_audio_stop,
.audio_config = hdmi_audio_config,
};
static int hdmi_audio_register(struct omap_hdmi *hdmi)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = &hdmi->pdev->dev,
.version = 4,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi->wp),
.ops = &hdmi_audio_ops,
};
hdmi->audio_pdev = platform_device_register_data(
&hdmi->pdev->dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
&pdata, sizeof(pdata));
if (IS_ERR(hdmi->audio_pdev))
return PTR_ERR(hdmi->audio_pdev);
return 0;
}
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
static int hdmi4_bind(struct device *dev, struct device *master, void *data)
{
struct dss_device *dss = dss_get_device(master);
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
hdmi->dss = dss;
r = hdmi_runtime_get(hdmi);
if (r)
return r;
r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
if (r)
goto err_runtime_put;
r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
if (r)
goto err_pll_uninit;
r = hdmi_audio_register(hdmi);
if (r) {
DSSERR("Registering HDMI audio failed\n");
goto err_cec_uninit;
}
hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
hdmi);
hdmi_runtime_put(hdmi);
return 0;
err_cec_uninit:
hdmi4_cec_uninit(&hdmi->core);
err_pll_uninit:
hdmi_pll_uninit(&hdmi->pll);
err_runtime_put:
hdmi_runtime_put(hdmi);
return r;
}
static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
{
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
dss_debugfs_remove_file(hdmi->debugfs);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
hdmi4_cec_uninit(&hdmi->core);
hdmi_pll_uninit(&hdmi->pll);
}
static const struct component_ops hdmi4_component_ops = {
.bind = hdmi4_bind,
.unbind = hdmi4_unbind,
};
/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
static int hdmi4_init_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
int r;
hdmi4_bridge_init(hdmi);
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
if (r < 0) {
hdmi4_bridge_cleanup(hdmi);
return r;
}
omapdss_device_register(out);
return 0;
}
static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
hdmi4_bridge_cleanup(hdmi);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
{
struct platform_device *pdev = hdmi->pdev;
struct device_node *node = pdev->dev.of_node;
struct device_node *ep;
int r;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
of_node_put(ep);
return r;
}
static int hdmi4_probe(struct platform_device *pdev)
{
struct omap_hdmi *hdmi;
int irq;
int r;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
dev_set_drvdata(&pdev->dev, hdmi);
mutex_init(&hdmi->lock);
spin_lock_init(&hdmi->audio_playing_lock);
r = hdmi4_probe_of(hdmi);
if (r)
goto err_free;
r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
goto err_free;
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
goto err_free;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
goto err_free;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
goto err_free;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
goto err_free;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(hdmi->vdda_reg)) {
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
goto err_free;
}
pm_runtime_enable(&pdev->dev);
r = hdmi4_init_output(hdmi);
if (r)
goto err_pm_disable;
r = component_add(&pdev->dev, &hdmi4_component_ops);
if (r)
goto err_uninit_output;
return 0;
err_uninit_output:
hdmi4_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
err_free:
kfree(hdmi);
return r;
}
static void hdmi4_remove(struct platform_device *pdev)
{
struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
component_del(&pdev->dev, &hdmi4_component_ops);
hdmi4_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap4-hdmi", },
{},
};
struct platform_driver omapdss_hdmi4hw_driver = {
.probe = hdmi4_probe,
.remove_new = hdmi4_remove,
.driver = {
.name = "omapdss_hdmi",
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
},
};
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*/
#define DSS_SUBSYS_NAME "DSI"
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/semaphore.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
#include "omapdss.h"
#include "dss.h"
#define DSI_CATCH_MISSING_TE
#include "dsi.h"
#define REG_GET(dsi, idx, start, end) \
FLD_GET(dsi_read_reg(dsi, idx), start, end)
#define REG_FLD_MOD(dsi, idx, val, start, end) \
dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
static int dsi_init_dispc(struct dsi_data *dsi);
static void dsi_uninit_dispc(struct dsi_data *dsi);
static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel);
static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
const struct mipi_dsi_msg *msg);
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
/* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */
#define VC_VIDEO 0
#define VC_CMD 1
#define drm_bridge_to_dsi(bridge) \
container_of(bridge, struct dsi_data, bridge)
static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
{
return dev_get_drvdata(dssdev->dev);
}
static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host)
{
return container_of(host, struct dsi_data, host);
}
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
void __iomem *base;
switch(idx.module) {
case DSI_PROTO: base = dsi->proto_base; break;
case DSI_PHY: base = dsi->phy_base; break;
case DSI_PLL: base = dsi->pll_base; break;
default: return;
}
__raw_writel(val, base + idx.idx);
}
static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
{
void __iomem *base;
switch(idx.module) {
case DSI_PROTO: base = dsi->proto_base; break;
case DSI_PHY: base = dsi->phy_base; break;
case DSI_PLL: base = dsi->pll_base; break;
default: return 0;
}
return __raw_readl(base + idx.idx);
}
static void dsi_bus_lock(struct dsi_data *dsi)
{
down(&dsi->bus_lock);
}
static void dsi_bus_unlock(struct dsi_data *dsi)
{
up(&dsi->bus_lock);
}
static bool dsi_bus_is_locked(struct dsi_data *dsi)
{
return dsi->bus_lock.count == 0;
}
static void dsi_completion_handler(void *data, u32 mask)
{
complete((struct completion *)data);
}
static inline bool wait_for_bit_change(struct dsi_data *dsi,
const struct dsi_reg idx,
int bitnum, int value)
{
unsigned long timeout;
ktime_t wait;
int t;
/* first busyloop to see if the bit changes right away */
t = 100;
while (t-- > 0) {
if (REG_GET(dsi, idx, bitnum, bitnum) == value)
return true;
}
/* then loop for 500ms, sleeping for 1ms in between */
timeout = jiffies + msecs_to_jiffies(500);
while (time_before(jiffies, timeout)) {
if (REG_GET(dsi, idx, bitnum, bitnum) == value)
return true;
wait = ns_to_ktime(1000 * 1000);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
}
return false;
}
#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
dsi->perf_setup_time = ktime_get();
}
static void dsi_perf_mark_start(struct dsi_data *dsi)
{
dsi->perf_start_time = ktime_get();
}
static void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
if (!dsi_perf)
return;
t = ktime_get();
setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
setup_us = (u32)ktime_to_us(setup_time);
if (setup_us == 0)
setup_us = 1;
trans_time = ktime_sub(t, dsi->perf_start_time);
trans_us = (u32)ktime_to_us(trans_time);
if (trans_us == 0)
trans_us = 1;
total_us = setup_us + trans_us;
total_bytes = dsi->update_bytes;
pr_info("DSI(%s): %u us + %u us = %u us (%uHz), %u bytes, %u kbytes/sec\n",
name,
setup_us,
trans_us,
total_us,
1000 * 1000 / total_us,
total_bytes,
total_bytes * 1000 / total_us);
}
#else
static inline void dsi_perf_mark_setup(struct dsi_data *dsi)
{
}
static inline void dsi_perf_mark_start(struct dsi_data *dsi)
{
}
static inline void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
}
#endif
static int verbose_irq;
static void print_irq_status(u32 status)
{
if (status == 0)
return;
if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
return;
#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""
pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
status,
verbose_irq ? PIS(VC0) : "",
verbose_irq ? PIS(VC1) : "",
verbose_irq ? PIS(VC2) : "",
verbose_irq ? PIS(VC3) : "",
PIS(WAKEUP),
PIS(RESYNC),
PIS(PLL_LOCK),
PIS(PLL_UNLOCK),
PIS(PLL_RECALL),
PIS(COMPLEXIO_ERR),
PIS(HS_TX_TIMEOUT),
PIS(LP_RX_TIMEOUT),
PIS(TE_TRIGGER),
PIS(ACK_TRIGGER),
PIS(SYNC_LOST),
PIS(LDO_POWER_GOOD),
PIS(TA_TIMEOUT));
#undef PIS
}
static void print_irq_status_vc(int vc, u32 status)
{
if (status == 0)
return;
if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
return;
#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
vc,
status,
PIS(CS),
PIS(ECC_CORR),
PIS(ECC_NO_CORR),
verbose_irq ? PIS(PACKET_SENT) : "",
PIS(BTA),
PIS(FIFO_TX_OVF),
PIS(FIFO_RX_OVF),
PIS(FIFO_TX_UDF),
PIS(PP_BUSY_CHANGE));
#undef PIS
}
static void print_irq_status_cio(u32 status)
{
if (status == 0)
return;
#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""
pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
status,
PIS(ERRSYNCESC1),
PIS(ERRSYNCESC2),
PIS(ERRSYNCESC3),
PIS(ERRESC1),
PIS(ERRESC2),
PIS(ERRESC3),
PIS(ERRCONTROL1),
PIS(ERRCONTROL2),
PIS(ERRCONTROL3),
PIS(STATEULPS1),
PIS(STATEULPS2),
PIS(STATEULPS3),
PIS(ERRCONTENTIONLP0_1),
PIS(ERRCONTENTIONLP1_1),
PIS(ERRCONTENTIONLP0_2),
PIS(ERRCONTENTIONLP1_2),
PIS(ERRCONTENTIONLP0_3),
PIS(ERRCONTENTIONLP1_3),
PIS(ULPSACTIVENOT_ALL0),
PIS(ULPSACTIVENOT_ALL1));
#undef PIS
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static void dsi_collect_irq_stats(struct dsi_data *dsi, u32 irqstatus,
u32 *vcstatus, u32 ciostatus)
{
int i;
spin_lock(&dsi->irq_stats_lock);
dsi->irq_stats.irq_count++;
dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
for (i = 0; i < 4; ++i)
dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
spin_unlock(&dsi->irq_stats_lock);
}
#else
#define dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus)
#endif
static int debug_irq;
static void dsi_handle_irq_errors(struct dsi_data *dsi, u32 irqstatus,
u32 *vcstatus, u32 ciostatus)
{
int i;
if (irqstatus & DSI_IRQ_ERROR_MASK) {
DSSERR("DSI error, irqstatus %x\n", irqstatus);
print_irq_status(irqstatus);
spin_lock(&dsi->errors_lock);
dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
spin_unlock(&dsi->errors_lock);
} else if (debug_irq) {
print_irq_status(irqstatus);
}
for (i = 0; i < 4; ++i) {
if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
i, vcstatus[i]);
print_irq_status_vc(i, vcstatus[i]);
} else if (debug_irq) {
print_irq_status_vc(i, vcstatus[i]);
}
}
if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
print_irq_status_cio(ciostatus);
} else if (debug_irq) {
print_irq_status_cio(ciostatus);
}
}
static void dsi_call_isrs(struct dsi_isr_data *isr_array,
unsigned int isr_array_size, u32 irqstatus)
{
struct dsi_isr_data *isr_data;
int i;
for (i = 0; i < isr_array_size; i++) {
isr_data = &isr_array[i];
if (isr_data->isr && isr_data->mask & irqstatus)
isr_data->isr(isr_data->arg, irqstatus);
}
}
static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
u32 irqstatus, u32 *vcstatus, u32 ciostatus)
{
int i;
dsi_call_isrs(isr_tables->isr_table,
ARRAY_SIZE(isr_tables->isr_table),
irqstatus);
for (i = 0; i < 4; ++i) {
if (vcstatus[i] == 0)
continue;
dsi_call_isrs(isr_tables->isr_table_vc[i],
ARRAY_SIZE(isr_tables->isr_table_vc[i]),
vcstatus[i]);
}
if (ciostatus != 0)
dsi_call_isrs(isr_tables->isr_table_cio,
ARRAY_SIZE(isr_tables->isr_table_cio),
ciostatus);
}
static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
{
struct dsi_data *dsi = arg;
u32 irqstatus, vcstatus[4], ciostatus;
int i;
if (!dsi->is_enabled)
return IRQ_NONE;
spin_lock(&dsi->irq_lock);
irqstatus = dsi_read_reg(dsi, DSI_IRQSTATUS);
/* IRQ is not for us */
if (!irqstatus) {
spin_unlock(&dsi->irq_lock);
return IRQ_NONE;
}
dsi_write_reg(dsi, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
dsi_read_reg(dsi, DSI_IRQSTATUS);
for (i = 0; i < 4; ++i) {
if ((irqstatus & (1 << i)) == 0) {
vcstatus[i] = 0;
continue;
}
vcstatus[i] = dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
dsi_write_reg(dsi, DSI_VC_IRQSTATUS(i), vcstatus[i]);
/* flush posted write */
dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
}
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
ciostatus = dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
dsi_write_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
} else {
ciostatus = 0;
}
#ifdef DSI_CATCH_MISSING_TE
if (irqstatus & DSI_IRQ_TE_TRIGGER)
del_timer(&dsi->te_timer);
#endif
/* make a copy and unlock, so that isrs can unregister
* themselves */
memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
sizeof(dsi->isr_tables));
spin_unlock(&dsi->irq_lock);
dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
dsi_handle_irq_errors(dsi, irqstatus, vcstatus, ciostatus);
dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus);
return IRQ_HANDLED;
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_configure_irqs(struct dsi_data *dsi,
struct dsi_isr_data *isr_array,
unsigned int isr_array_size,
u32 default_mask,
const struct dsi_reg enable_reg,
const struct dsi_reg status_reg)
{
struct dsi_isr_data *isr_data;
u32 mask;
u32 old_mask;
int i;
mask = default_mask;
for (i = 0; i < isr_array_size; i++) {
isr_data = &isr_array[i];
if (isr_data->isr == NULL)
continue;
mask |= isr_data->mask;
}
old_mask = dsi_read_reg(dsi, enable_reg);
/* clear the irqstatus for newly enabled irqs */
dsi_write_reg(dsi, status_reg, (mask ^ old_mask) & mask);
dsi_write_reg(dsi, enable_reg, mask);
/* flush posted writes */
dsi_read_reg(dsi, enable_reg);
dsi_read_reg(dsi, status_reg);
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_set_irqs(struct dsi_data *dsi)
{
u32 mask = DSI_IRQ_ERROR_MASK;
#ifdef DSI_CATCH_MISSING_TE
mask |= DSI_IRQ_TE_TRIGGER;
#endif
_omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table,
ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
DSI_IRQENABLE, DSI_IRQSTATUS);
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_set_irqs_vc(struct dsi_data *dsi, int vc)
{
_omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_vc[vc],
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
DSI_VC_IRQ_ERROR_MASK,
DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_set_irqs_cio(struct dsi_data *dsi)
{
_omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_cio,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
DSI_CIO_IRQ_ERROR_MASK,
DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
}
static void _dsi_initialize_irq(struct dsi_data *dsi)
{
unsigned long flags;
int vc;
spin_lock_irqsave(&dsi->irq_lock, flags);
memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
_omap_dsi_set_irqs(dsi);
for (vc = 0; vc < 4; ++vc)
_omap_dsi_set_irqs_vc(dsi, vc);
_omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
}
static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
struct dsi_isr_data *isr_array, unsigned int isr_array_size)
{
struct dsi_isr_data *isr_data;
int free_idx;
int i;
BUG_ON(isr == NULL);
/* check for duplicate entry and find a free slot */
free_idx = -1;
for (i = 0; i < isr_array_size; i++) {
isr_data = &isr_array[i];
if (isr_data->isr == isr && isr_data->arg == arg &&
isr_data->mask == mask) {
return -EINVAL;
}
if (isr_data->isr == NULL && free_idx == -1)
free_idx = i;
}
if (free_idx == -1)
return -EBUSY;
isr_data = &isr_array[free_idx];
isr_data->isr = isr;
isr_data->arg = arg;
isr_data->mask = mask;
return 0;
}
static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
struct dsi_isr_data *isr_array, unsigned int isr_array_size)
{
struct dsi_isr_data *isr_data;
int i;
for (i = 0; i < isr_array_size; i++) {
isr_data = &isr_array[i];
if (isr_data->isr != isr || isr_data->arg != arg ||
isr_data->mask != mask)
continue;
isr_data->isr = NULL;
isr_data->arg = NULL;
isr_data->mask = 0;
return 0;
}
return -EINVAL;
}
static int dsi_register_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
void *arg, u32 mask)
{
unsigned long flags;
int r;
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
_omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
void *arg, u32 mask)
{
unsigned long flags;
int r;
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
_omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_register_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
int r;
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_register_isr(isr, arg, mask,
dsi->isr_tables.isr_table_vc[vc],
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
_omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
int r;
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_unregister_isr(isr, arg, mask,
dsi->isr_tables.isr_table_vc[vc],
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
_omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static u32 dsi_get_errors(struct dsi_data *dsi)
{
unsigned long flags;
u32 e;
spin_lock_irqsave(&dsi->errors_lock, flags);
e = dsi->errors;
dsi->errors = 0;
spin_unlock_irqrestore(&dsi->errors_lock, flags);
return e;
}
static int dsi_runtime_get(struct dsi_data *dsi)
{
int r;
DSSDBG("dsi_runtime_get\n");
r = pm_runtime_get_sync(dsi->dev);
if (WARN_ON(r < 0)) {
pm_runtime_put_noidle(dsi->dev);
return r;
}
return 0;
}
static void dsi_runtime_put(struct dsi_data *dsi)
{
int r;
DSSDBG("dsi_runtime_put\n");
r = pm_runtime_put_sync(dsi->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static void _dsi_print_reset_status(struct dsi_data *dsi)
{
int b0, b1, b2;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
b0 = 28;
b1 = 27;
b2 = 26;
} else {
b0 = 24;
b1 = 25;
b2 = 26;
}
#define DSI_FLD_GET(fld, start, end)\
FLD_GET(dsi_read_reg(dsi, DSI_##fld), start, end)
pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
DSI_FLD_GET(PLL_STATUS, 0, 0),
DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
DSI_FLD_GET(DSIPHY_CFG5, 31, 31));
#undef DSI_FLD_GET
}
static inline int dsi_if_enable(struct dsi_data *dsi, bool enable)
{
DSSDBG("dsi_if_enable(%d)\n", enable);
enable = enable ? 1 : 0;
REG_FLD_MOD(dsi, DSI_CTRL, enable, 0, 0); /* IF_EN */
if (!wait_for_bit_change(dsi, DSI_CTRL, 0, enable)) {
DSSERR("Failed to set dsi_if_enable to %d\n", enable);
return -EIO;
}
return 0;
}
static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct dsi_data *dsi)
{
return dsi->pll.cinfo.clkout[HSDIV_DISPC];
}
static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct dsi_data *dsi)
{
return dsi->pll.cinfo.clkout[HSDIV_DSI];
}
static unsigned long dsi_get_txbyteclkhs(struct dsi_data *dsi)
{
return dsi->pll.cinfo.clkdco / 16;
}
static unsigned long dsi_fclk_rate(struct dsi_data *dsi)
{
unsigned long r;
enum dss_clk_source source;
source = dss_get_dsi_clk_source(dsi->dss, dsi->module_id);
if (source == DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
r = dsi_get_pll_hsdiv_dsi_rate(dsi);
}
return r;
}
static int dsi_lp_clock_calc(unsigned long dsi_fclk,
unsigned long lp_clk_min, unsigned long lp_clk_max,
struct dsi_lp_clock_info *lp_cinfo)
{
unsigned int lp_clk_div;
unsigned long lp_clk;
lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
lp_clk = dsi_fclk / 2 / lp_clk_div;
if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
return -EINVAL;
lp_cinfo->lp_clk_div = lp_clk_div;
lp_cinfo->lp_clk = lp_clk;
return 0;
}
static int dsi_set_lp_clk_divisor(struct dsi_data *dsi)
{
unsigned long dsi_fclk;
unsigned int lp_clk_div;
unsigned long lp_clk;
unsigned int lpdiv_max = dsi->data->max_pll_lpdiv;
lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
return -EINVAL;
dsi_fclk = dsi_fclk_rate(dsi);
lp_clk = dsi_fclk / 2 / lp_clk_div;
DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
dsi->current_lp_cinfo.lp_clk = lp_clk;
dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
/* LP_CLK_DIVISOR */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, lp_clk_div, 12, 0);
/* LP_RX_SYNCHRO_ENABLE */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
return 0;
}
static void dsi_enable_scp_clk(struct dsi_data *dsi)
{
if (dsi->scp_clk_refcount++ == 0)
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
}
static void dsi_disable_scp_clk(struct dsi_data *dsi)
{
WARN_ON(dsi->scp_clk_refcount == 0);
if (--dsi->scp_clk_refcount == 0)
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
}
enum dsi_pll_power_state {
DSI_PLL_POWER_OFF = 0x0,
DSI_PLL_POWER_ON_HSCLK = 0x1,
DSI_PLL_POWER_ON_ALL = 0x2,
DSI_PLL_POWER_ON_DIV = 0x3,
};
static int dsi_pll_power(struct dsi_data *dsi, enum dsi_pll_power_state state)
{
int t = 0;
/* DSI-PLL power command 0x3 is not working */
if ((dsi->data->quirks & DSI_QUIRK_PLL_PWR_BUG) &&
state == DSI_PLL_POWER_ON_DIV)
state = DSI_PLL_POWER_ON_ALL;
/* PLL_PWR_CMD */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, state, 31, 30);
/* PLL_PWR_STATUS */
while (FLD_GET(dsi_read_reg(dsi, DSI_CLK_CTRL), 29, 28) != state) {
if (++t > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
state);
return -ENODEV;
}
udelay(1);
}
return 0;
}
static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi,
struct dss_pll_clock_info *cinfo)
{
unsigned long max_dsi_fck;
max_dsi_fck = dsi->data->max_fck_freq;
cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
}
static int dsi_pll_enable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
int r = 0;
DSSDBG("PLL init\n");
r = dsi_runtime_get(dsi);
if (r)
return r;
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
*/
dsi_enable_scp_clk(dsi);
r = regulator_enable(dsi->vdds_dsi_reg);
if (r)
goto err0;
/* XXX PLL does not come out of reset without this... */
dispc_pck_free_enable(dsi->dss->dispc, 1);
if (!wait_for_bit_change(dsi, DSI_PLL_STATUS, 0, 1)) {
DSSERR("PLL not coming out of reset.\n");
r = -ENODEV;
dispc_pck_free_enable(dsi->dss->dispc, 0);
goto err1;
}
/* XXX ... but if left on, we get problems when planes do not
* fill the whole display. No idea about this */
dispc_pck_free_enable(dsi->dss->dispc, 0);
r = dsi_pll_power(dsi, DSI_PLL_POWER_ON_ALL);
if (r)
goto err1;
DSSDBG("PLL init done\n");
return 0;
err1:
regulator_disable(dsi->vdds_dsi_reg);
err0:
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
return r;
}
static void dsi_pll_disable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
regulator_disable(dsi->vdds_dsi_reg);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
DSSDBG("PLL disable done\n");
}
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
{
struct dsi_data *dsi = s->private;
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
struct dss_pll *pll = &dsi->pll;
dispc_clk_src = dss_get_dispc_clk_source(dsi->dss);
dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module);
if (dsi_runtime_get(dsi))
return 0;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
seq_printf(s, "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin));
seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n);
seq_printf(s, "CLKIN4DDR\t%-16lum %u\n",
cinfo->clkdco, cinfo->m);
seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
dss_get_clk_source_name(dsi_module == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1),
cinfo->clkout[HSDIV_DISPC],
cinfo->mX[HSDIV_DISPC],
dispc_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
dss_get_clk_source_name(dsi_module == 0 ?
DSS_CLK_SRC_PLL1_2 :
DSS_CLK_SRC_PLL2_2),
cinfo->clkout[HSDIV_DSI],
cinfo->mX[HSDIV_DSI],
dsi_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "- DSI%d -\n", dsi_module + 1);
seq_printf(s, "dsi fclk source = %s\n",
dss_get_clk_source_name(dsi_clk_src));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsi));
seq_printf(s, "DDR_CLK\t\t%lu\n",
cinfo->clkdco / 4);
seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsi));
seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
dsi_runtime_put(dsi);
return 0;
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = s->private;
unsigned long flags;
struct dsi_irq_stats *stats;
stats = kmalloc(sizeof(*stats), GFP_KERNEL);
if (!stats)
return -ENOMEM;
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
*stats = dsi->irq_stats;
memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
dsi->irq_stats.last_reset = jiffies;
spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
seq_printf(s, "period %u ms\n",
jiffies_to_msecs(jiffies - stats->last_reset));
seq_printf(s, "irqs %d\n", stats->irq_count);
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]);
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
PIS(VC1);
PIS(VC2);
PIS(VC3);
PIS(WAKEUP);
PIS(RESYNC);
PIS(PLL_LOCK);
PIS(PLL_UNLOCK);
PIS(PLL_RECALL);
PIS(COMPLEXIO_ERR);
PIS(HS_TX_TIMEOUT);
PIS(LP_RX_TIMEOUT);
PIS(TE_TRIGGER);
PIS(ACK_TRIGGER);
PIS(SYNC_LOST);
PIS(LDO_POWER_GOOD);
PIS(TA_TIMEOUT);
#undef PIS
#define PIS(x) \
seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
seq_printf(s, "-- VC interrupts --\n");
PIS(CS);
PIS(ECC_CORR);
PIS(PACKET_SENT);
PIS(FIFO_TX_OVF);
PIS(FIFO_RX_OVF);
PIS(BTA);
PIS(ECC_NO_CORR);
PIS(FIFO_TX_UDF);
PIS(PP_BUSY_CHANGE);
#undef PIS
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, \
stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
seq_printf(s, "-- CIO interrupts --\n");
PIS(ERRSYNCESC1);
PIS(ERRSYNCESC2);
PIS(ERRSYNCESC3);
PIS(ERRESC1);
PIS(ERRESC2);
PIS(ERRESC3);
PIS(ERRCONTROL1);
PIS(ERRCONTROL2);
PIS(ERRCONTROL3);
PIS(STATEULPS1);
PIS(STATEULPS2);
PIS(STATEULPS3);
PIS(ERRCONTENTIONLP0_1);
PIS(ERRCONTENTIONLP1_1);
PIS(ERRCONTENTIONLP0_2);
PIS(ERRCONTENTIONLP1_2);
PIS(ERRCONTENTIONLP0_3);
PIS(ERRCONTENTIONLP1_3);
PIS(ULPSACTIVENOT_ALL0);
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
kfree(stats);
return 0;
}
#endif
static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = s->private;
if (dsi_runtime_get(dsi))
return 0;
dsi_enable_scp_clk(dsi);
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
DUMPREG(DSI_SYSSTATUS);
DUMPREG(DSI_IRQSTATUS);
DUMPREG(DSI_IRQENABLE);
DUMPREG(DSI_CTRL);
DUMPREG(DSI_COMPLEXIO_CFG1);
DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
DUMPREG(DSI_CLK_CTRL);
DUMPREG(DSI_TIMING1);
DUMPREG(DSI_TIMING2);
DUMPREG(DSI_VM_TIMING1);
DUMPREG(DSI_VM_TIMING2);
DUMPREG(DSI_VM_TIMING3);
DUMPREG(DSI_CLK_TIMING);
DUMPREG(DSI_TX_FIFO_VC_SIZE);
DUMPREG(DSI_RX_FIFO_VC_SIZE);
DUMPREG(DSI_COMPLEXIO_CFG2);
DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
DUMPREG(DSI_VM_TIMING4);
DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
DUMPREG(DSI_VM_TIMING5);
DUMPREG(DSI_VM_TIMING6);
DUMPREG(DSI_VM_TIMING7);
DUMPREG(DSI_STOPCLK_TIMING);
DUMPREG(DSI_VC_CTRL(0));
DUMPREG(DSI_VC_TE(0));
DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
DUMPREG(DSI_VC_IRQSTATUS(0));
DUMPREG(DSI_VC_IRQENABLE(0));
DUMPREG(DSI_VC_CTRL(1));
DUMPREG(DSI_VC_TE(1));
DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
DUMPREG(DSI_VC_IRQSTATUS(1));
DUMPREG(DSI_VC_IRQENABLE(1));
DUMPREG(DSI_VC_CTRL(2));
DUMPREG(DSI_VC_TE(2));
DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
DUMPREG(DSI_VC_IRQSTATUS(2));
DUMPREG(DSI_VC_IRQENABLE(2));
DUMPREG(DSI_VC_CTRL(3));
DUMPREG(DSI_VC_TE(3));
DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
DUMPREG(DSI_VC_IRQSTATUS(3));
DUMPREG(DSI_VC_IRQENABLE(3));
DUMPREG(DSI_DSIPHY_CFG0);
DUMPREG(DSI_DSIPHY_CFG1);
DUMPREG(DSI_DSIPHY_CFG2);
DUMPREG(DSI_DSIPHY_CFG5);
DUMPREG(DSI_PLL_CONTROL);
DUMPREG(DSI_PLL_STATUS);
DUMPREG(DSI_PLL_GO);
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
#undef DUMPREG
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
return 0;
}
enum dsi_cio_power_state {
DSI_COMPLEXIO_POWER_OFF = 0x0,
DSI_COMPLEXIO_POWER_ON = 0x1,
DSI_COMPLEXIO_POWER_ULPS = 0x2,
};
static int dsi_cio_power(struct dsi_data *dsi, enum dsi_cio_power_state state)
{
int t = 0;
/* PWR_CMD */
REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG1, state, 28, 27);
/* PWR_STATUS */
while (FLD_GET(dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1),
26, 25) != state) {
if (++t > 1000) {
DSSERR("failed to set complexio power state to "
"%d\n", state);
return -ENODEV;
}
udelay(1);
}
return 0;
}
static unsigned int dsi_get_line_buf_size(struct dsi_data *dsi)
{
int val;
/* line buffer on OMAP3 is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes
* considerable TX slowdown with update sizes that fill the
* whole buffer */
if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
return 1023 * 3;
val = REG_GET(dsi, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
switch (val) {
case 1:
return 512 * 3; /* 512x24 bits */
case 2:
return 682 * 3; /* 682x24 bits */
case 3:
return 853 * 3; /* 853x24 bits */
case 4:
return 1024 * 3; /* 1024x24 bits */
case 5:
return 1194 * 3; /* 1194x24 bits */
case 6:
return 1365 * 3; /* 1365x24 bits */
case 7:
return 1920 * 3; /* 1920x24 bits */
default:
BUG();
return 0;
}
}
static int dsi_set_lane_config(struct dsi_data *dsi)
{
static const u8 offsets[] = { 0, 4, 8, 12, 16 };
static const enum dsi_lane_function functions[] = {
DSI_LANE_CLK,
DSI_LANE_DATA1,
DSI_LANE_DATA2,
DSI_LANE_DATA3,
DSI_LANE_DATA4,
};
u32 r;
int i;
r = dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1);
for (i = 0; i < dsi->num_lanes_used; ++i) {
unsigned int offset = offsets[i];
unsigned int polarity, lane_number;
unsigned int t;
for (t = 0; t < dsi->num_lanes_supported; ++t)
if (dsi->lanes[t].function == functions[i])
break;
if (t == dsi->num_lanes_supported)
return -EINVAL;
lane_number = t;
polarity = dsi->lanes[t].polarity;
r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
r = FLD_MOD(r, polarity, offset + 3, offset + 3);
}
/* clear the unused lanes */
for (; i < dsi->num_lanes_supported; ++i) {
unsigned int offset = offsets[i];
r = FLD_MOD(r, 0, offset + 2, offset);
r = FLD_MOD(r, 0, offset + 3, offset + 3);
}
dsi_write_reg(dsi, DSI_COMPLEXIO_CFG1, r);
return 0;
}
static inline unsigned int ns2ddr(struct dsi_data *dsi, unsigned int ns)
{
/* convert time in ns to ddr ticks, rounding up */
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
}
static inline unsigned int ddr2ns(struct dsi_data *dsi, unsigned int ddr)
{
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
return ddr * 1000 * 1000 / (ddr_clk / 1000);
}
static void dsi_cio_timings(struct dsi_data *dsi)
{
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
u32 tclk_prepare;
/* calculate timings */
/* 1 * DDR_CLK = 2 * UI */
/* min 40ns + 4*UI max 85ns + 6*UI */
ths_prepare = ns2ddr(dsi, 70) + 2;
/* min 145ns + 10*UI */
ths_prepare_ths_zero = ns2ddr(dsi, 175) + 2;
/* min max(8*UI, 60ns+4*UI) */
ths_trail = ns2ddr(dsi, 60) + 5;
/* min 100ns */
ths_exit = ns2ddr(dsi, 145);
/* tlpx min 50n */
tlpx_half = ns2ddr(dsi, 25);
/* min 60ns */
tclk_trail = ns2ddr(dsi, 60) + 2;
/* min 38ns, max 95ns */
tclk_prepare = ns2ddr(dsi, 65);
/* min tclk-prepare + tclk-zero = 300ns */
tclk_zero = ns2ddr(dsi, 260);
DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
ths_prepare, ddr2ns(dsi, ths_prepare),
ths_prepare_ths_zero, ddr2ns(dsi, ths_prepare_ths_zero));
DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
ths_trail, ddr2ns(dsi, ths_trail),
ths_exit, ddr2ns(dsi, ths_exit));
DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
"tclk_zero %u (%uns)\n",
tlpx_half, ddr2ns(dsi, tlpx_half),
tclk_trail, ddr2ns(dsi, tclk_trail),
tclk_zero, ddr2ns(dsi, tclk_zero));
DSSDBG("tclk_prepare %u (%uns)\n",
tclk_prepare, ddr2ns(dsi, tclk_prepare));
/* program timings */
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
r = FLD_MOD(r, ths_prepare, 31, 24);
r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
r = FLD_MOD(r, ths_trail, 15, 8);
r = FLD_MOD(r, ths_exit, 7, 0);
dsi_write_reg(dsi, DSI_DSIPHY_CFG0, r);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
r = FLD_MOD(r, tlpx_half, 20, 16);
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
if (dsi->data->quirks & DSI_QUIRK_PHY_DCC) {
r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
}
dsi_write_reg(dsi, DSI_DSIPHY_CFG1, r);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
r = FLD_MOD(r, tclk_prepare, 7, 0);
dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
int t, i;
bool in_use[DSI_MAX_NR_LANES];
static const u8 offsets_old[] = { 28, 27, 26 };
static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
const u8 *offsets;
if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC)
offsets = offsets_old;
else
offsets = offsets_new;
for (i = 0; i < dsi->num_lanes_supported; ++i)
in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
t = 100000;
while (true) {
u32 l;
int ok;
l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
ok = 0;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
if (!in_use[i] || (l & (1 << offsets[i])))
ok++;
}
if (ok == dsi->num_lanes_supported)
break;
if (--t == 0) {
for (i = 0; i < dsi->num_lanes_supported; ++i) {
if (!in_use[i] || (l & (1 << offsets[i])))
continue;
DSSERR("CIO TXCLKESC%d domain not coming " \
"out of reset\n", i);
}
return -EIO;
}
}
return 0;
}
/* return bitmask of enabled lanes, lane0 being the lsb */
static unsigned int dsi_get_lane_mask(struct dsi_data *dsi)
{
unsigned int mask = 0;
int i;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
if (dsi->lanes[i].function != DSI_LANE_UNUSED)
mask |= 1 << i;
}
return mask;
}
/* OMAP4 CONTROL_DSIPHY */
#define OMAP4_DSIPHY_SYSCON_OFFSET 0x78
#define OMAP4_DSI2_LANEENABLE_SHIFT 29
#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
#define OMAP4_DSI1_LANEENABLE_SHIFT 24
#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
#define OMAP4_DSI1_PIPD_SHIFT 19
#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
#define OMAP4_DSI2_PIPD_SHIFT 14
#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
static int dsi_omap4_mux_pads(struct dsi_data *dsi, unsigned int lanes)
{
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
if (dsi->module_id == 0) {
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
pipd_mask = OMAP4_DSI1_PIPD_MASK;
pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
} else if (dsi->module_id == 1) {
enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
pipd_mask = OMAP4_DSI2_PIPD_MASK;
pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
} else {
return -ENODEV;
}
return regmap_update_bits(dsi->syscon, OMAP4_DSIPHY_SYSCON_OFFSET,
enable_mask | pipd_mask,
(lanes << enable_shift) | (lanes << pipd_shift));
}
/* OMAP5 CONTROL_DSIPHY */
#define OMAP5_DSIPHY_SYSCON_OFFSET 0x74
#define OMAP5_DSI1_LANEENABLE_SHIFT 24
#define OMAP5_DSI2_LANEENABLE_SHIFT 19
#define OMAP5_DSI_LANEENABLE_MASK 0x1f
static int dsi_omap5_mux_pads(struct dsi_data *dsi, unsigned int lanes)
{
u32 enable_shift;
if (dsi->module_id == 0)
enable_shift = OMAP5_DSI1_LANEENABLE_SHIFT;
else if (dsi->module_id == 1)
enable_shift = OMAP5_DSI2_LANEENABLE_SHIFT;
else
return -ENODEV;
return regmap_update_bits(dsi->syscon, OMAP5_DSIPHY_SYSCON_OFFSET,
OMAP5_DSI_LANEENABLE_MASK << enable_shift,
lanes << enable_shift);
}
static int dsi_enable_pads(struct dsi_data *dsi, unsigned int lane_mask)
{
if (dsi->data->model == DSI_MODEL_OMAP4)
return dsi_omap4_mux_pads(dsi, lane_mask);
if (dsi->data->model == DSI_MODEL_OMAP5)
return dsi_omap5_mux_pads(dsi, lane_mask);
return 0;
}
static void dsi_disable_pads(struct dsi_data *dsi)
{
if (dsi->data->model == DSI_MODEL_OMAP4)
dsi_omap4_mux_pads(dsi, 0);
else if (dsi->data->model == DSI_MODEL_OMAP5)
dsi_omap5_mux_pads(dsi, 0);
}
static int dsi_cio_init(struct dsi_data *dsi)
{
int r;
u32 l;
DSSDBG("DSI CIO init starts");
r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsi));
if (r)
return r;
dsi_enable_scp_clk(dsi);
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
if (!wait_for_bit_change(dsi, DSI_DSIPHY_CFG5, 30, 1)) {
DSSERR("CIO SCP Clock domain not coming out of reset.\n");
r = -EIO;
goto err_scp_clk_dom;
}
r = dsi_set_lane_config(dsi);
if (r)
goto err_scp_clk_dom;
/* set TX STOP MODE timer to maximum for this operation */
l = dsi_read_reg(dsi, DSI_TIMING1);
l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsi, DSI_TIMING1, l);
r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
if (!wait_for_bit_change(dsi, DSI_COMPLEXIO_CFG1, 29, 1)) {
DSSERR("CIO PWR clock domain not coming out of reset.\n");
r = -ENODEV;
goto err_cio_pwr_dom;
}
dsi_if_enable(dsi, true);
dsi_if_enable(dsi, false);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
r = dsi_cio_wait_tx_clk_esc_reset(dsi);
if (r)
goto err_tx_clk_esc_rst;
/* FORCE_TX_STOP_MODE_IO */
REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
dsi_cio_timings(dsi);
/* DDR_CLK_ALWAYS_ON */
REG_FLD_MOD(dsi, DSI_CLK_CTRL,
!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS),
13, 13);
DSSDBG("CIO init done\n");
return 0;
err_tx_clk_esc_rst:
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
err_cio_pwr_dom:
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
err_scp_clk_dom:
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
return r;
}
static void dsi_cio_uninit(struct dsi_data *dsi)
{
/* DDR_CLK_ALWAYS_ON */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
}
static void dsi_config_tx_fifo(struct dsi_data *dsi,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
u32 r = 0;
int add = 0;
int i;
dsi->vc[0].tx_fifo_size = size1;
dsi->vc[1].tx_fifo_size = size2;
dsi->vc[2].tx_fifo_size = size3;
dsi->vc[3].tx_fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
int size = dsi->vc[i].tx_fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
BUG();
return;
}
v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
r |= v << (8 * i);
/*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
add += size;
}
dsi_write_reg(dsi, DSI_TX_FIFO_VC_SIZE, r);
}
static void dsi_config_rx_fifo(struct dsi_data *dsi,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
u32 r = 0;
int add = 0;
int i;
dsi->vc[0].rx_fifo_size = size1;
dsi->vc[1].rx_fifo_size = size2;
dsi->vc[2].rx_fifo_size = size3;
dsi->vc[3].rx_fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
int size = dsi->vc[i].rx_fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
BUG();
return;
}
v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
r |= v << (8 * i);
/*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
add += size;
}
dsi_write_reg(dsi, DSI_RX_FIFO_VC_SIZE, r);
}
static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
{
u32 r;
r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
dsi_write_reg(dsi, DSI_TIMING1, r);
if (!wait_for_bit_change(dsi, DSI_TIMING1, 15, 0)) {
DSSERR("TX_STOP bit not going down\n");
return -EIO;
}
return 0;
}
static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc)
{
return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = vp_data->dsi;
const int vc = dsi->update_vc;
u8 bit = dsi->te_enabled ? 30 : 31;
if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0)
complete(vp_data->completion);
}
static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
.dsi = dsi,
.completion = &completion
};
int r = 0;
u8 bit;
bit = dsi->te_enabled ? 30 : 31;
r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
r = -EIO;
goto err1;
}
}
dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
static void dsi_packet_sent_handler_l4(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = l4_data->dsi;
const int vc = dsi->update_vc;
if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0)
complete(l4_data->completion);
}
static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
.dsi = dsi,
.completion = &completion
};
int r = 0;
r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
r = -EIO;
goto err1;
}
}
dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
static int dsi_sync_vc(struct dsi_data *dsi, int vc)
{
WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
if (!dsi_vc_is_enabled(dsi, vc))
return 0;
switch (dsi->vc[vc].source) {
case DSI_VC_SOURCE_VP:
return dsi_sync_vc_vp(dsi, vc);
case DSI_VC_SOURCE_L4:
return dsi_sync_vc_l4(dsi, vc);
default:
BUG();
return -EINVAL;
}
}
static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable)
{
DSSDBG("dsi_vc_enable vc %d, enable %d\n",
vc, enable);
enable = enable ? 1 : 0;
REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0);
if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) {
DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
return -EIO;
}
return 0;
}
static void dsi_vc_initial_config(struct dsi_data *dsi, int vc)
{
u32 r;
DSSDBG("Initial config of VC %d", vc);
r = dsi_read_reg(dsi, DSI_VC_CTRL(vc));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
vc);
r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
if (dsi->data->quirks & DSI_QUIRK_VC_OCP_WIDTH)
r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
dsi_write_reg(dsi, DSI_VC_CTRL(vc), r);
dsi->vc[vc].source = DSI_VC_SOURCE_L4;
}
static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc,
bool enable)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable);
if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable)
return;
WARN_ON(!dsi_bus_is_locked(dsi));
dsi_vc_enable(dsi, vc, 0);
dsi_if_enable(dsi, 0);
REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9);
dsi_vc_enable(dsi, vc, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsi);
}
static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc)
{
while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
(val >> 16) & 0xff,
(val >> 24) & 0xff);
}
}
static void dsi_show_rx_ack_with_err(u16 err)
{
DSSERR("\tACK with ERROR (%#x):\n", err);
if (err & (1 << 0))
DSSERR("\t\tSoT Error\n");
if (err & (1 << 1))
DSSERR("\t\tSoT Sync Error\n");
if (err & (1 << 2))
DSSERR("\t\tEoT Sync Error\n");
if (err & (1 << 3))
DSSERR("\t\tEscape Mode Entry Command Error\n");
if (err & (1 << 4))
DSSERR("\t\tLP Transmit Sync Error\n");
if (err & (1 << 5))
DSSERR("\t\tHS Receive Timeout Error\n");
if (err & (1 << 6))
DSSERR("\t\tFalse Control Error\n");
if (err & (1 << 7))
DSSERR("\t\t(reserved7)\n");
if (err & (1 << 8))
DSSERR("\t\tECC Error, single-bit (corrected)\n");
if (err & (1 << 9))
DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
if (err & (1 << 10))
DSSERR("\t\tChecksum Error\n");
if (err & (1 << 11))
DSSERR("\t\tData type not recognized\n");
if (err & (1 << 12))
DSSERR("\t\tInvalid VC ID\n");
if (err & (1 << 13))
DSSERR("\t\tInvalid Transmission Length\n");
if (err & (1 << 14))
DSSERR("\t\t(reserved14)\n");
if (err & (1 << 15))
DSSERR("\t\tDSI Protocol Violation\n");
}
static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc)
{
/* RX_FIFO_NOT_EMPTY */
while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
u8 dt;
val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
u16 err = FLD_GET(val, 23, 8);
dsi_show_rx_ack_with_err(err);
} else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
DSSERR("\tDCS short response, 1 byte: %#x\n",
FLD_GET(val, 23, 8));
} else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
DSSERR("\tDCS short response, 2 byte: %#x\n",
FLD_GET(val, 23, 8));
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
dsi_vc_flush_long_data(dsi, vc);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
}
return 0;
}
static int dsi_vc_send_bta(struct dsi_data *dsi, int vc)
{
if (dsi->debug_write || dsi->debug_read)
DSSDBG("dsi_vc_send_bta %d\n", vc);
WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
dsi_vc_flush_receive_data(dsi, vc);
}
REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */
/* flush posted write */
dsi_read_reg(dsi, DSI_VC_CTRL(vc));
return 0;
}
static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
r = dsi_register_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
if (r)
goto err1;
r = dsi_vc_send_bta(dsi, vc);
if (r)
goto err2;
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(500)) == 0) {
DSSERR("Failed to receive BTA\n");
r = -EIO;
goto err2;
}
err = dsi_get_errors(dsi);
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
goto err2;
}
err2:
dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc,
int channel, u8 data_type, u16 len,
u8 ecc)
{
u32 val;
u8 data_id;
WARN_ON(!dsi_bus_is_locked(dsi));
data_id = data_type | channel << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val);
}
static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc,
u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val);
}
static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
const struct mipi_dsi_msg *msg)
{
/*u32 val; */
int i;
const u8 *p;
int r = 0;
u8 b1, b2, b3, b4;
if (dsi->debug_write)
DSSDBG("dsi_vc_send_long, %zu bytes\n", msg->tx_len);
/* len + header */
if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0);
p = msg->tx_buf;
for (i = 0; i < msg->tx_len >> 2; i++) {
if (dsi->debug_write)
DSSDBG("\tsending full packet %d\n", i);
b1 = *p++;
b2 = *p++;
b3 = *p++;
b4 = *p++;
dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4);
}
i = msg->tx_len % 4;
if (i) {
b1 = 0; b2 = 0; b3 = 0;
if (dsi->debug_write)
DSSDBG("\tsending remainder bytes %d\n", i);
switch (i) {
case 3:
b1 = *p++;
b2 = *p++;
b3 = *p++;
break;
case 2:
b1 = *p++;
b2 = *p++;
break;
case 1:
b1 = *p++;
break;
}
dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0);
}
return r;
}
static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
const struct mipi_dsi_msg *msg)
{
struct mipi_dsi_packet pkt;
int ret;
u32 r;
ret = mipi_dsi_create_packet(&pkt, msg);
if (ret < 0)
return ret;
WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n",
vc, msg->type, pkt.header[1], pkt.header[2]);
if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 |
pkt.header[0];
dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r);
return 0;
}
static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel)
{
const struct mipi_dsi_msg msg = {
.channel = channel,
.type = MIPI_DSI_NULL_PACKET,
};
return dsi_vc_send_long(dsi, vc, &msg);
}
static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc,
const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
if (mipi_dsi_packet_format_is_short(msg->type))
r = dsi_vc_send_short(dsi, vc, msg);
else
r = dsi_vc_send_long(dsi, vc, msg);
if (r < 0)
return r;
/*
* TODO: we do not always have to do the BTA sync, for example
* we can improve performance by setting the update window
* information without sending BTA sync between the commands.
* In that case we can return early.
*/
r = dsi_vc_send_bta_sync(dssdev, vc);
if (r) {
DSSERR("bta sync failed\n");
return r;
}
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
DSSERR("rx fifo not empty after write, dumping data:\n");
dsi_vc_flush_receive_data(dsi, vc);
return -EIO;
}
return 0;
}
static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf,
int buflen, enum dss_dsi_content_type type)
{
u32 val;
u8 dt;
int r;
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
u16 err = FLD_GET(val, 23, 8);
dsi_show_rx_ack_with_err(err);
r = -EIO;
goto err;
} else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
u8 data = FLD_GET(val, 15, 8);
if (dsi->debug_read)
DSSDBG("\t%s short response, 1 byte: %02x\n",
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
"DCS", data);
if (buflen < 1) {
r = -EIO;
goto err;
}
buf[0] = data;
return 1;
} else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
u16 data = FLD_GET(val, 23, 8);
if (dsi->debug_read)
DSSDBG("\t%s short response, 2 byte: %04x\n",
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
"DCS", data);
if (buflen < 2) {
r = -EIO;
goto err;
}
buf[0] = data & 0xff;
buf[1] = (data >> 8) & 0xff;
return 2;
} else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
int w;
int len = FLD_GET(val, 23, 8);
if (dsi->debug_read)
DSSDBG("\t%s long response, len %d\n",
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
"DCS", len);
if (len > buflen) {
r = -EIO;
goto err;
}
/* two byte checksum ends the packet, not included in len */
for (w = 0; w < len + 2;) {
int b;
val = dsi_read_reg(dsi,
DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
(val >> 16) & 0xff,
(val >> 24) & 0xff);
for (b = 0; b < 4; ++b) {
if (w < len)
buf[w] = (val >> (b * 8)) & 0xff;
/* we discard the 2 byte checksum */
++w;
}
}
return len;
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
r = -EIO;
goto err;
}
err:
DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc,
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
return r;
}
static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc,
const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
u8 cmd = ((u8 *)msg->tx_buf)[0];
int r;
if (dsi->debug_read)
DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd);
r = dsi_vc_send_short(dsi, vc, msg);
if (r)
goto err;
r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
goto err;
r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
if (r != msg->rx_len) {
r = -EIO;
goto err;
}
return 0;
err:
DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__, vc, cmd);
return r;
}
static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
r = dsi_vc_send_short(dsi, vc, msg);
if (r)
goto err;
r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
goto err;
r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
goto err;
if (r != msg->rx_len) {
r = -EIO;
goto err;
}
return 0;
err:
DSSERR("%s(vc %d, reqlen %zu) failed\n", __func__, vc, msg->tx_len);
return r;
}
static void dsi_set_lp_rx_timeout(struct dsi_data *dsi, unsigned int ticks,
bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
u32 r;
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate(dsi);
r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
total_ticks,
ticks, x4 ? " x4" : "", x16 ? " x16" : "",
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_set_ta_timeout(struct dsi_data *dsi, unsigned int ticks,
bool x8, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
u32 r;
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate(dsi);
r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
total_ticks,
ticks, x8 ? " x8" : "", x16 ? " x16" : "",
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_set_stop_state_counter(struct dsi_data *dsi, unsigned int ticks,
bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
u32 r;
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate(dsi);
r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
total_ticks,
ticks, x4 ? " x4" : "", x16 ? " x16" : "",
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_set_hs_tx_timeout(struct dsi_data *dsi, unsigned int ticks,
bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
u32 r;
BUG_ON(ticks > 0x1fff);
/* ticks in TxByteClkHS */
fck = dsi_get_txbyteclkhs(dsi);
r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
total_ticks,
ticks, x4 ? " x4" : "", x16 ? " x16" : "",
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
{
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
const struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
} else {
/* Use maximum number of line buffers in command mode */
num_line_buffers = 2;
}
/* LINE_BUFFER */
REG_FLD_MOD(dsi, DSI_CTRL, num_line_buffers, 13, 12);
}
static void dsi_config_vp_sync_events(struct dsi_data *dsi)
{
bool sync_end;
u32 r;
if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
sync_end = true;
else
sync_end = false;
r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */
r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */
dsi_write_reg(dsi, DSI_CTRL, r);
}
static void dsi_config_blanking_modes(struct dsi_data *dsi)
{
int blanking_mode = dsi->vm_timings.blanking_mode;
int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
u32 r;
/*
* 0 = TX FIFO packets sent or LPS in corresponding blanking periods
* 1 = Long blanking packets are sent in corresponding blanking periods
*/
r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
dsi_write_reg(dsi, DSI_CTRL, r);
}
/*
* According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
* results in maximum transition time for data and clock lanes to enter and
* exit HS mode. Hence, this is the scenario where the least amount of command
* mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
* clock cycles that can be used to interleave command mode data in HS so that
* all scenarios are satisfied.
*/
static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
{
int transition;
/*
* If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
* time of data lanes only, if it isn't set, we need to consider HS
* transition time of both data and clock lanes. HS transition time
* of Scenario 3 is considered.
*/
if (ddr_alwon) {
transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
} else {
int trans1, trans2;
trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
enter_hs + 1;
transition = max(trans1, trans2);
}
return blank > transition ? blank - transition : 0;
}
/*
* According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
* results in maximum transition time for data lanes to enter and exit LP mode.
* Hence, this is the scenario where the least amount of command mode data can
* be interleaved. We program the minimum amount of bytes that can be
* interleaved in LP so that all scenarios are satisfied.
*/
static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
int lp_clk_div, int tdsi_fclk)
{
int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
int lp_inter; /* cmd mode data that can be interleaved, in bytes */
/* maximum LP transition time according to Scenario 1 */
trans_lp = exit_hs + max(enter_hs, 2) + 1;
/* CLKIN4DDR = 16 * TXBYTECLKHS */
tlp_avail = thsbyte_clk * (blank - trans_lp);
ttxclkesc = tdsi_fclk * lp_clk_div;
lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
26) / 16;
return max(lp_inter, 0);
}
static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
{
int blanking_mode;
int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
const struct videomode *vm = &dsi->vm;
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
int bl_interleave_hs = 0, bl_interleave_lp = 0;
u32 r;
r = dsi_read_reg(dsi, DSI_CTRL);
blanking_mode = FLD_GET(r, 20, 20);
hfp_blanking_mode = FLD_GET(r, 21, 21);
hbp_blanking_mode = FLD_GET(r, 22, 22);
hsa_blanking_mode = FLD_GET(r, 23, 23);
r = dsi_read_reg(dsi, DSI_VM_TIMING1);
hbp = FLD_GET(r, 11, 0);
hfp = FLD_GET(r, 23, 12);
hsa = FLD_GET(r, 31, 24);
r = dsi_read_reg(dsi, DSI_CLK_TIMING);
ddr_clk_post = FLD_GET(r, 7, 0);
ddr_clk_pre = FLD_GET(r, 15, 8);
r = dsi_read_reg(dsi, DSI_VM_TIMING7);
exit_hs_mode_lat = FLD_GET(r, 15, 0);
enter_hs_mode_lat = FLD_GET(r, 31, 16);
r = dsi_read_reg(dsi, DSI_CLK_CTRL);
lp_clk_div = FLD_GET(r, 12, 0);
ddr_alwon = FLD_GET(r, 13, 13);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_exit = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tclk_trail = FLD_GET(r, 15, 8);
exiths_clk = ths_exit + tclk_trail;
width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
if (!hsa_blanking_mode) {
hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
enter_hs_mode_lat, exit_hs_mode_lat,
exiths_clk, ddr_clk_pre, ddr_clk_post);
hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
enter_hs_mode_lat, exit_hs_mode_lat,
lp_clk_div, dsi_fclk_hsdiv);
}
if (!hfp_blanking_mode) {
hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
enter_hs_mode_lat, exit_hs_mode_lat,
exiths_clk, ddr_clk_pre, ddr_clk_post);
hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
enter_hs_mode_lat, exit_hs_mode_lat,
lp_clk_div, dsi_fclk_hsdiv);
}
if (!hbp_blanking_mode) {
hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
enter_hs_mode_lat, exit_hs_mode_lat,
exiths_clk, ddr_clk_pre, ddr_clk_post);
hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
enter_hs_mode_lat, exit_hs_mode_lat,
lp_clk_div, dsi_fclk_hsdiv);
}
if (!blanking_mode) {
bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
enter_hs_mode_lat, exit_hs_mode_lat,
exiths_clk, ddr_clk_pre, ddr_clk_post);
bl_interleave_lp = dsi_compute_interleave_lp(bllp,
enter_hs_mode_lat, exit_hs_mode_lat,
lp_clk_div, dsi_fclk_hsdiv);
}
DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
bl_interleave_hs);
DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
bl_interleave_lp);
r = dsi_read_reg(dsi, DSI_VM_TIMING4);
r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
dsi_write_reg(dsi, DSI_VM_TIMING4, r);
r = dsi_read_reg(dsi, DSI_VM_TIMING5);
r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
dsi_write_reg(dsi, DSI_VM_TIMING5, r);
r = dsi_read_reg(dsi, DSI_VM_TIMING6);
r = FLD_MOD(r, bl_interleave_hs, 31, 15);
r = FLD_MOD(r, bl_interleave_lp, 16, 0);
dsi_write_reg(dsi, DSI_VM_TIMING6, r);
}
static int dsi_proto_config(struct dsi_data *dsi)
{
u32 r;
int buswidth = 0;
dsi_config_tx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
dsi_config_rx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
dsi_set_stop_state_counter(dsi, 0x1000, false, false);
dsi_set_ta_timeout(dsi, 0x1fff, true, true);
dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) {
case 16:
buswidth = 0;
break;
case 18:
buswidth = 1;
break;
case 24:
buswidth = 2;
break;
default:
BUG();
return -EINVAL;
}
r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
if (!(dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC)) {
r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
/* DCS_CMD_CODE, 1=start, 0=continue */
r = FLD_MOD(r, 0, 25, 25);
}
dsi_write_reg(dsi, DSI_CTRL, r);
dsi_config_vp_num_line_buffers(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_config_vp_sync_events(dsi);
dsi_config_blanking_modes(dsi);
dsi_config_cmd_mode_interleaving(dsi);
}
dsi_vc_initial_config(dsi, 0);
dsi_vc_initial_config(dsi, 1);
dsi_vc_initial_config(dsi, 2);
dsi_vc_initial_config(dsi, 3);
return 0;
}
static void dsi_proto_timings(struct dsi_data *dsi)
{
unsigned int tlpx, tclk_zero, tclk_prepare;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned int ths_trail, ths_exit;
unsigned int ddr_clk_pre, ddr_clk_post;
unsigned int enter_hs_mode_lat, exit_hs_mode_lat;
unsigned int ths_eot;
int ndl = dsi->num_lanes_used - 1;
u32 r;
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_prepare = FLD_GET(r, 31, 24);
ths_prepare_ths_zero = FLD_GET(r, 23, 16);
ths_zero = ths_prepare_ths_zero - ths_prepare;
ths_trail = FLD_GET(r, 15, 8);
ths_exit = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
tclk_prepare = FLD_GET(r, 7, 0);
/* min 8*UI */
tclk_pre = 20;
/* min 60ns + 52*UI */
tclk_post = ns2ddr(dsi, 60) + 26;
ths_eot = DIV_ROUND_UP(4, ndl);
ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
4);
ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
r = dsi_read_reg(dsi, DSI_CLK_TIMING);
r = FLD_MOD(r, ddr_clk_pre, 15, 8);
r = FLD_MOD(r, ddr_clk_post, 7, 0);
dsi_write_reg(dsi, DSI_CLK_TIMING, r);
DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
ddr_clk_pre,
ddr_clk_post);
enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
DIV_ROUND_UP(ths_prepare, 4) +
DIV_ROUND_UP(ths_zero + 3, 4);
exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
FLD_VAL(exit_hs_mode_lat, 15, 0);
dsi_write_reg(dsi, DSI_VM_TIMING7, r);
DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
enter_hs_mode_lat, exit_hs_mode_lat);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
/* TODO: Implement a video mode check_timings function */
int hsa = dsi->vm_timings.hsa;
int hfp = dsi->vm_timings.hfp;
int hbp = dsi->vm_timings.hbp;
int vsa = dsi->vm_timings.vsa;
int vfp = dsi->vm_timings.vfp;
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
const struct videomode *vm = &dsi->vm;
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int tl, t_he, width_bytes;
hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;
DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
hfp, hsync_end ? hsa : 0, tl);
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
vsa, vm->vactive);
r = dsi_read_reg(dsi, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
r = FLD_MOD(r, hfp, 23, 12); /* HFP */
r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
dsi_write_reg(dsi, DSI_VM_TIMING1, r);
r = dsi_read_reg(dsi, DSI_VM_TIMING2);
r = FLD_MOD(r, vbp, 7, 0); /* VBP */
r = FLD_MOD(r, vfp, 15, 8); /* VFP */
r = FLD_MOD(r, vsa, 23, 16); /* VSA */
r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
dsi_write_reg(dsi, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsi, DSI_VM_TIMING3);
r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsi, DSI_VM_TIMING3, r);
}
}
static int dsi_configure_pins(struct dsi_data *dsi,
int num_pins, const u32 *pins)
{
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
int num_lanes;
int i;
static const enum dsi_lane_function functions[] = {
DSI_LANE_CLK,
DSI_LANE_DATA1,
DSI_LANE_DATA2,
DSI_LANE_DATA3,
DSI_LANE_DATA4,
};
if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
|| num_pins % 2 != 0)
return -EINVAL;
for (i = 0; i < DSI_MAX_NR_LANES; ++i)
lanes[i].function = DSI_LANE_UNUSED;
num_lanes = 0;
for (i = 0; i < num_pins; i += 2) {
u8 lane, pol;
u32 dx, dy;
dx = pins[i];
dy = pins[i + 1];
if (dx >= dsi->num_lanes_supported * 2)
return -EINVAL;
if (dy >= dsi->num_lanes_supported * 2)
return -EINVAL;
if (dx & 1) {
if (dy != dx - 1)
return -EINVAL;
pol = 1;
} else {
if (dy != dx + 1)
return -EINVAL;
pol = 0;
}
lane = dx / 2;
lanes[lane].function = functions[i / 2];
lanes[lane].polarity = pol;
num_lanes++;
}
memcpy(dsi->lanes, lanes, sizeof(dsi->lanes));
dsi->num_lanes_used = num_lanes;
return 0;
}
static int dsi_enable_video_mode(struct dsi_data *dsi, int vc)
{
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
u8 data_type;
u16 word_count;
switch (dsi->pix_fmt) {
case MIPI_DSI_FMT_RGB888:
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
break;
case MIPI_DSI_FMT_RGB666:
data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
break;
case MIPI_DSI_FMT_RGB565:
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
break;
default:
return -EINVAL;
}
dsi_if_enable(dsi, false);
dsi_vc_enable(dsi, vc, false);
/* MODE, 1 = video mode */
REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4);
word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type,
word_count, 0);
dsi_vc_enable(dsi, vc, true);
dsi_if_enable(dsi, true);
return 0;
}
static void dsi_disable_video_mode(struct dsi_data *dsi, int vc)
{
dsi_if_enable(dsi, false);
dsi_vc_enable(dsi, vc, false);
/* MODE, 0 = command mode */
REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4);
dsi_vc_enable(dsi, vc, true);
dsi_if_enable(dsi, true);
}
static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
r = dsi_init_dispc(dsi);
if (r) {
dev_err(dsi->dev, "failed to init dispc!\n");
return;
}
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
r = dsi_enable_video_mode(dsi, vc);
if (r)
goto err_video_mode;
}
r = dss_mgr_enable(&dsi->output);
if (r)
goto err_mgr_enable;
return;
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsi, false);
dsi_vc_enable(dsi, vc, false);
}
err_video_mode:
dsi_uninit_dispc(dsi);
dev_err(dsi->dev, "failed to enable DSI encoder!\n");
return;
}
static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
dsi_disable_video_mode(dsi, vc);
dss_mgr_disable(&dsi->output);
dsi_uninit_dispc(dsi);
}
static void dsi_update_screen_dispc(struct dsi_data *dsi)
{
unsigned int bytespp;
unsigned int bytespl;
unsigned int bytespf;
unsigned int total_len;
unsigned int packet_payload;
unsigned int packet_len;
u32 l;
int r;
const unsigned vc = dsi->update_vc;
const unsigned int line_buf_size = dsi->line_buffer_size;
u16 w = dsi->vm.hactive;
u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
bytespp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
bytespf = bytespl * h;
/* NOTE: packet_payload has to be equal to N * bytespl, where N is
* number of lines in a packet. See errata about VP_CLK_RATIO */
if (bytespf < line_buf_size)
packet_payload = bytespf;
else
packet_payload = (line_buf_size) / bytespl * bytespl;
packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
total_len = (bytespf / packet_payload) * packet_len;
if (bytespf % packet_payload)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
dsi_write_reg(dsi, DSI_VC_TE(vc), l);
dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
dsi_write_reg(dsi, DSI_VC_TE(vc), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
* framedone interrupt could be delayed for quite a long time. I think
* the same goes for any DSS interrupts, but for some reason I have not
* seen the problem anywhere else than here.
*/
dispc_disable_sidle(dsi->dss->dispc);
dsi_perf_mark_start(dsi);
r = schedule_delayed_work(&dsi->framedone_timeout_work,
msecs_to_jiffies(250));
BUG_ON(r == 0);
dss_mgr_start_update(&dsi->output);
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
dsi_vc_send_bta(dsi, vc);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
#endif
}
}
#ifdef DSI_CATCH_MISSING_TE
static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
#endif
static void dsi_handle_framedone(struct dsi_data *dsi, int error)
{
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle(dsi->dss->dispc);
if (dsi->te_enabled) {
/* enable LP_RX_TO again after the TE */
REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
dsi_bus_unlock(dsi);
if (!error)
dsi_perf_show(dsi, "DISPC");
}
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
{
struct dsi_data *dsi = container_of(work, struct dsi_data,
framedone_timeout_work.work);
/* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
* 250ms which would conflict with this timeout work. What should be
* done is first cancel the transfer on the HW, and then cancel the
* possibly scheduled framedone work. However, cancelling the transfer
* on the HW is buggy, and would probably require resetting the whole
* DSI */
DSSERR("Framedone not received for 250ms!\n");
dsi_handle_framedone(dsi, -ETIMEDOUT);
}
static void dsi_framedone_irq_callback(void *data)
{
struct dsi_data *dsi = data;
/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
* turns itself off. However, DSI still has the pixels in its buffers,
* and is sending the data.
*/
cancel_delayed_work(&dsi->framedone_timeout_work);
DSSDBG("Framedone received!\n");
dsi_handle_framedone(dsi, 0);
}
static int _dsi_update(struct dsi_data *dsi)
{
dsi_perf_mark_setup(dsi);
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive *
mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsi);
return 0;
}
static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel)
{
const u8 payload[] = { MIPI_DCS_NOP };
const struct mipi_dsi_msg msg = {
.channel = channel,
.type = MIPI_DSI_DCS_SHORT_WRITE,
.tx_len = 1,
.tx_buf = payload,
};
WARN_ON(!dsi_bus_is_locked(dsi));
return _omap_dsi_host_transfer(dsi, vc, &msg);
}
static int dsi_update_channel(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
dsi_bus_lock(dsi);
if (!dsi->video_enabled) {
r = -EIO;
goto err;
}
if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) {
r = -EINVAL;
goto err;
}
DSSDBG("dsi_update_channel: %d", vc);
/*
* Send NOP between the frames. If we don't send something here, the
* updates stop working. This is probably related to DSI spec stating
* that the DSI host should transition to LP at least once per frame.
*/
r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel);
if (r < 0) {
DSSWARN("failed to send nop between frames: %d\n", r);
goto err;
}
dsi->update_vc = vc;
if (dsi->te_enabled && dsi->te_gpio) {
schedule_delayed_work(&dsi->te_timeout_work,
msecs_to_jiffies(250));
atomic_set(&dsi->do_ext_te_update, 1);
} else {
_dsi_update(dsi);
}
return 0;
err:
dsi_bus_unlock(dsi);
return r;
}
static int dsi_update_all(struct omap_dss_device *dssdev)
{
return dsi_update_channel(dssdev, VC_VIDEO);
}
/* Display funcs */
static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
{
struct dispc_clock_info dispc_cinfo;
int r;
unsigned long fck;
fck = dsi_get_pll_hsdiv_dispc_rate(dsi);
dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
r = dispc_calc_clock_rates(dsi->dss->dispc, fck, &dispc_cinfo);
if (r) {
DSSERR("Failed to calc dispc clocks\n");
return r;
}
dsi->mgr_config.clock_info = dispc_cinfo;
return 0;
}
static int dsi_init_dispc(struct dsi_data *dsi)
{
enum omap_channel dispc_channel = dsi->output.dispc_channel;
int r;
dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
r = dss_mgr_register_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
if (r) {
DSSERR("can't register FRAMEDONE handler\n");
goto err;
}
dsi->mgr_config.stallmode = true;
dsi->mgr_config.fifohandcheck = true;
} else {
dsi->mgr_config.stallmode = false;
dsi->mgr_config.fifohandcheck = false;
}
r = dsi_configure_dispc_clocks(dsi);
if (r)
goto err1;
dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dsi->mgr_config.video_port_width =
mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
return 0;
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
err:
dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
return r;
}
static void dsi_uninit_dispc(struct dsi_data *dsi)
{
enum omap_channel dispc_channel = dsi->output.dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
{
struct dss_pll_clock_info cinfo;
int r;
cinfo = dsi->user_dsi_cinfo;
r = dss_pll_set_config(&dsi->pll, &cinfo);
if (r) {
DSSERR("Failed to set dsi clocks\n");
return r;
}
return 0;
}
static void dsi_setup_dsi_vcs(struct dsi_data *dsi)
{
/* Setup VC_CMD for LP and cpu transfers */
REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */
REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */
dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4;
/* Setup VC_VIDEO for HS and dispc transfers */
REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */
REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */
dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP;
if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) &&
!(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO))
REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */
dsi_vc_enable(dsi, VC_CMD, 1);
dsi_vc_enable(dsi, VC_VIDEO, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsi);
/* start the DDR clock by sending a NULL packet */
if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel);
}
static int dsi_init_dsi(struct dsi_data *dsi)
{
int r;
r = dss_pll_enable(&dsi->pll);
if (r)
return r;
r = dsi_configure_dsi_clocks(dsi);
if (r)
goto err0;
dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_2 : DSS_CLK_SRC_PLL2_2);
DSSDBG("PLL OK\n");
if (!dsi->vdds_dsi_enabled) {
r = regulator_enable(dsi->vdds_dsi_reg);
if (r)
goto err1;
dsi->vdds_dsi_enabled = true;
}
r = dsi_cio_init(dsi);
if (r)
goto err2;
_dsi_print_reset_status(dsi);
dsi_proto_timings(dsi);
dsi_set_lp_clk_divisor(dsi);
if (1)
_dsi_print_reset_status(dsi);
r = dsi_proto_config(dsi);
if (r)
goto err3;
dsi_setup_dsi_vcs(dsi);
return 0;
err3:
dsi_cio_uninit(dsi);
err2:
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
err1:
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err0:
dss_pll_disable(&dsi->pll);
return r;
}
static void dsi_uninit_dsi(struct dsi_data *dsi)
{
/* disable interface */
dsi_if_enable(dsi, 0);
dsi_vc_enable(dsi, 0, 0);
dsi_vc_enable(dsi, 1, 0);
dsi_vc_enable(dsi, 2, 0);
dsi_vc_enable(dsi, 3, 0);
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsi);
dss_pll_disable(&dsi->pll);
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
static void dsi_enable(struct dsi_data *dsi)
{
int r;
WARN_ON(!dsi_bus_is_locked(dsi));
if (WARN_ON(dsi->iface_enabled))
return;
mutex_lock(&dsi->lock);
r = dsi_runtime_get(dsi);
if (r)
goto err_get_dsi;
_dsi_initialize_irq(dsi);
r = dsi_init_dsi(dsi);
if (r)
goto err_init_dsi;
dsi->iface_enabled = true;
mutex_unlock(&dsi->lock);
return;
err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_enable FAILED\n");
}
static void dsi_disable(struct dsi_data *dsi)
{
WARN_ON(!dsi_bus_is_locked(dsi));
if (WARN_ON(!dsi->iface_enabled))
return;
mutex_lock(&dsi->lock);
dsi_sync_vc(dsi, 0);
dsi_sync_vc(dsi, 1);
dsi_sync_vc(dsi, 2);
dsi_sync_vc(dsi, 3);
dsi_uninit_dsi(dsi);
dsi_runtime_put(dsi);
dsi->iface_enabled = false;
mutex_unlock(&dsi->lock);
}
static int dsi_enable_te(struct dsi_data *dsi, bool enable)
{
dsi->te_enabled = enable;
if (dsi->te_gpio) {
if (enable)
enable_irq(dsi->te_irq);
else
disable_irq(dsi->te_irq);
}
return 0;
}
#ifdef PRINT_VERBOSE_VM_TIMINGS
static void print_dsi_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
unsigned long byteclk = t->hsclk / 4;
int bl, wc, pps, tot;
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
TO_DSI_T(t->hfp),
TO_DSI_T(bl),
TO_DSI_T(pps),
TO_DSI_T(tot));
#undef TO_DSI_T
}
static void print_dispc_vm(const char *str, const struct videomode *vm)
{
unsigned long pck = vm->pixelclock;
int hact, bl, tot;
hact = vm->hactive;
bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch,
bl, hact, tot,
TO_DISPC_T(vm->hsync_len),
TO_DISPC_T(vm->hback_porch),
TO_DISPC_T(hact),
TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
TO_DISPC_T(hact),
TO_DISPC_T(tot));
#undef TO_DISPC_T
}
/* note: this is not quite accurate */
static void print_dsi_dispc_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
struct videomode vm = { 0 };
unsigned long byteclk = t->hsclk / 4;
unsigned long pck;
u64 dsi_tput;
int dsi_hact, dsi_htot;
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
vm.pixelclock = pck;
vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk);
vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk);
vm.hactive = t->hact;
print_dispc_vm(str, &vm);
}
#endif /* PRINT_VERBOSE_VM_TIMINGS */
static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
struct videomode *vm = &ctx->vm;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
*vm = *ctx->config->vm;
vm->pixelclock = pck;
vm->hactive = ctx->config->vm->hactive;
vm->vactive = ctx->config->vm->vactive;
vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
vm->vfront_porch = vm->vback_porch = 0;
return true;
}
static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
return dispc_div_calc(ctx->dsi->dss->dispc, dispc,
ctx->req_pck_min, ctx->req_pck_max,
dsi_cm_calc_dispc_cb, ctx);
}
static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
dsi->data->max_fck_freq,
dsi_cm_calc_hsdiv_cb, ctx);
}
static bool dsi_cm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
unsigned long clkin;
int bitspp, ndl;
unsigned long pll_min, pll_max;
unsigned long pck, txbyteclk;
clkin = clk_get_rate(dsi->pll.clkin);
bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
ndl = dsi->num_lanes_used - 1;
/*
* Here we should calculate minimum txbyteclk to be able to send the
* frame in time, and also to handle TE. That's not very simple, though,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
pck = cfg->vm->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
memset(ctx, 0, sizeof(*ctx));
ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
ctx->req_pck_min = pck;
ctx->req_pck_nom = pck;
ctx->req_pck_max = pck * 3 / 2;
pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
pll_max = cfg->hs_clk_max * 4;
return dss_pll_calc_a(ctx->pll, clkin,
pll_min, pll_max,
dsi_cm_calc_pll_cb, ctx);
}
static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
unsigned long byteclk = hsclk / 4;
unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
int xres;
int panel_htot, panel_hbl; /* pixels */
int dispc_htot, dispc_hbl; /* pixels */
int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
int hfp, hsa, hbp;
const struct videomode *req_vm;
struct videomode *dispc_vm;
struct omap_dss_dsi_videomode_timings *dsi_vm;
u64 dsi_tput, dispc_tput;
dsi_tput = (u64)byteclk * ndl * 8;
req_vm = cfg->vm;
req_pck_min = ctx->req_pck_min;
req_pck_max = ctx->req_pck_max;
req_pck_nom = ctx->req_pck_nom;
dispc_pck = ctx->dispc_cinfo.pck;
dispc_tput = (u64)dispc_pck * bitspp;
xres = req_vm->hactive;
panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
req_vm->hsync_len;
panel_htot = xres + panel_hbl;
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
/*
* When there are no line buffers, DISPC and DSI must have the
* same tput. Otherwise DISPC tput needs to be higher than DSI's.
*/
if (dsi->line_buffer_size < xres * bitspp / 8) {
if (dispc_tput != dsi_tput)
return false;
} else {
if (dispc_tput < dsi_tput)
return false;
}
/* DSI tput must be over the min requirement */
if (dsi_tput < (u64)bitspp * req_pck_min)
return false;
/* When non-burst mode, DSI tput must be below max requirement. */
if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
if (dsi_tput > (u64)bitspp * req_pck_max)
return false;
}
hss = DIV_ROUND_UP(4, ndl);
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
if (ndl == 3 && req_vm->hsync_len == 0)
hse = 1;
else
hse = DIV_ROUND_UP(4, ndl);
} else {
hse = 0;
}
/* DSI htot to match the panel's nominal pck */
dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);
/* fail if there would be no time for blanking */
if (dsi_htot < hss + hse + dsi_hact)
return false;
/* total DSI blanking needed to achieve panel's TL */
dsi_hbl = dsi_htot - dsi_hact;
/* DISPC htot to match the DSI TL */
dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);
/* verify that the DSI and DISPC TLs are the same */
if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
return false;
dispc_hbl = dispc_htot - xres;
/* setup DSI videomode */
dsi_vm = &ctx->dsi_vm;
memset(dsi_vm, 0, sizeof(*dsi_vm));
dsi_vm->hsclk = hsclk;
dsi_vm->ndl = ndl;
dsi_vm->bitspp = bitspp;
if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
hsa = 0;
} else if (ndl == 3 && req_vm->hsync_len == 0) {
hsa = 0;
} else {
hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
hsa = max(hsa - hse, 1);
}
hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
hbp = max(hbp, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
if (hfp < 1) {
int t;
/* we need to take cycles from hbp */
t = 1 - hfp;
hbp = max(hbp - t, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
if (hfp < 1 && hsa > 0) {
/* we need to take cycles from hsa */
t = 1 - hfp;
hsa = max(hsa - t, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
}
}
if (hfp < 1)
return false;
dsi_vm->hss = hss;
dsi_vm->hsa = hsa;
dsi_vm->hse = hse;
dsi_vm->hbp = hbp;
dsi_vm->hact = xres;
dsi_vm->hfp = hfp;
dsi_vm->vsa = req_vm->vsync_len;
dsi_vm->vbp = req_vm->vback_porch;
dsi_vm->vact = req_vm->vactive;
dsi_vm->vfp = req_vm->vfront_porch;
dsi_vm->trans_mode = cfg->trans_mode;
dsi_vm->blanking_mode = 0;
dsi_vm->hsa_blanking_mode = 1;
dsi_vm->hfp_blanking_mode = 1;
dsi_vm->hbp_blanking_mode = 1;
dsi_vm->window_sync = 4;
/* setup DISPC videomode */
dispc_vm = &ctx->vm;
*dispc_vm = *req_vm;
dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
req_pck_nom);
hsa = max(hsa, 1);
} else {
hsa = 1;
}
hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
hbp = max(hbp, 1);
hfp = dispc_hbl - hsa - hbp;
if (hfp < 1) {
int t;
/* we need to take cycles from hbp */
t = 1 - hfp;
hbp = max(hbp - t, 1);
hfp = dispc_hbl - hsa - hbp;
if (hfp < 1) {
/* we need to take cycles from hsa */
t = 1 - hfp;
hsa = max(hsa - t, 1);
hfp = dispc_hbl - hsa - hbp;
}
}
if (hfp < 1)
return false;
dispc_vm->hfront_porch = hfp;
dispc_vm->hsync_len = hsa;
dispc_vm->hback_porch = hbp;
return true;
}
static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
if (dsi_vm_calc_blanking(ctx) == false)
return false;
#ifdef PRINT_VERBOSE_VM_TIMINGS
print_dispc_vm("dispc", &ctx->vm);
print_dsi_vm("dsi ", &ctx->dsi_vm);
print_dispc_vm("req ", ctx->config->vm);
print_dsi_dispc_vm("act ", &ctx->dsi_vm);
#endif
return true;
}
static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
unsigned long pck_max;
ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
/*
* In burst mode we can let the dispc pck be arbitrarily high, but it
* limits our scaling abilities. So for now, don't aim too high.
*/
if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
pck_max = ctx->req_pck_max + 10000000;
else
pck_max = ctx->req_pck_max;
return dispc_div_calc(ctx->dsi->dss->dispc, dispc,
ctx->req_pck_min, pck_max,
dsi_vm_calc_dispc_cb, ctx);
}
static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
dsi->data->max_fck_freq,
dsi_vm_calc_hsdiv_cb, ctx);
}
static bool dsi_vm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
const struct videomode *vm = cfg->vm;
unsigned long clkin;
unsigned long pll_min;
unsigned long pll_max;
int ndl = dsi->num_lanes_used - 1;
int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
unsigned long byteclk_min;
clkin = clk_get_rate(dsi->pll.clkin);
memset(ctx, 0, sizeof(*ctx));
ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
/* these limits should come from the panel driver */
ctx->req_pck_min = vm->pixelclock - 1000;
ctx->req_pck_nom = vm->pixelclock;
ctx->req_pck_max = vm->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
pll_max = cfg->hs_clk_max * 4;
} else {
unsigned long byteclk_max;
byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
ndl * 8);
pll_max = byteclk_max * 4 * 4;
}
return dss_pll_calc_a(ctx->pll, clkin,
pll_min, pll_max,
dsi_vm_calc_pll_cb, ctx);
}
static bool dsi_is_video_mode(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE;
}
static int __dsi_calc_config(struct dsi_data *dsi,
const struct drm_display_mode *mode,
struct dsi_clk_calc_ctx *ctx)
{
struct omap_dss_dsi_config cfg = dsi->config;
struct videomode vm;
bool ok;
int r;
drm_display_mode_to_videomode(mode, &vm);
cfg.vm = &vm;
cfg.mode = dsi->mode;
cfg.pixel_format = dsi->pix_fmt;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
ok = dsi_vm_calc(dsi, &cfg, ctx);
else
ok = dsi_cm_calc(dsi, &cfg, ctx);
if (!ok)
return -EINVAL;
dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo);
r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI],
cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo);
if (r)
return r;
return 0;
}
static int dsi_set_config(struct omap_dss_device *dssdev,
const struct drm_display_mode *mode)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
struct dsi_clk_calc_ctx ctx;
int r;
mutex_lock(&dsi->lock);
r = __dsi_calc_config(dsi, mode, &ctx);
if (r) {
DSSERR("failed to find suitable DSI clock settings\n");
goto err;
}
dsi->user_lp_cinfo = ctx.lp_cinfo;
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
dsi->vm = ctx.vm;
/*
* override interlace, logic level and edge related parameters in
* videomode with default values
*/
dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
/*
* HACK: These flags should be handled through the omap_dss_device bus
* flags, but this will only be possible when the DSI encoder will be
* converted to the omapdrm-managed encoder model.
*/
dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
dss_mgr_set_timings(&dsi->output, &dsi->vm);
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
return 0;
err:
mutex_unlock(&dsi->lock);
return r;
}
/*
* Return a hardcoded dispc channel for the DSI output. This should work for
* current use cases, but this can be later expanded to either resolve
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi)
{
switch (dsi->data->model) {
case DSI_MODEL_OMAP3:
return OMAP_DSS_CHANNEL_LCD;
case DSI_MODEL_OMAP4:
switch (dsi->module_id) {
case 0:
return OMAP_DSS_CHANNEL_LCD;
case 1:
return OMAP_DSS_CHANNEL_LCD2;
default:
DSSWARN("unsupported module id\n");
return OMAP_DSS_CHANNEL_LCD;
}
case DSI_MODEL_OMAP5:
switch (dsi->module_id) {
case 0:
return OMAP_DSS_CHANNEL_LCD;
case 1:
return OMAP_DSS_CHANNEL_LCD3;
default:
DSSWARN("unsupported module id\n");
return OMAP_DSS_CHANNEL_LCD;
}
default:
DSSWARN("unsupported DSS version\n");
return OMAP_DSS_CHANNEL_LCD;
}
}
static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
const struct mipi_dsi_msg *msg)
{
struct omap_dss_device *dssdev = &dsi->output;
int r;
dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM));
switch (msg->type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
case MIPI_DSI_GENERIC_LONG_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_LONG_WRITE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
case MIPI_DSI_NULL_PACKET:
r = dsi_vc_write_common(dssdev, vc, msg);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
r = dsi_vc_generic_read(dssdev, vc, msg);
break;
case MIPI_DSI_DCS_READ:
r = dsi_vc_dcs_read(dssdev, vc, msg);
break;
default:
r = -EINVAL;
break;
}
if (r < 0)
return r;
if (msg->type == MIPI_DSI_DCS_SHORT_WRITE ||
msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) {
u8 cmd = ((u8 *)msg->tx_buf)[0];
if (cmd == MIPI_DCS_SET_TEAR_OFF)
dsi_enable_te(dsi, false);
else if (cmd == MIPI_DCS_SET_TEAR_ON)
dsi_enable_te(dsi, true);
}
return 0;
}
static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = host_to_omap(host);
int r;
int vc = VC_CMD;
dsi_bus_lock(dsi);
if (!dsi->iface_enabled) {
dsi_enable(dsi);
schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000));
}
r = _omap_dsi_host_transfer(dsi, vc, msg);
dsi_bus_unlock(dsi);
return r;
}
static int dsi_get_clocks(struct dsi_data *dsi)
{
struct clk *clk;
clk = devm_clk_get(dsi->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get fck\n");
return PTR_ERR(clk);
}
dsi->dss_clk = clk;
return 0;
}
static const struct omapdss_dsi_ops dsi_ops = {
.update = dsi_update_all,
.is_video_mode = dsi_is_video_mode,
};
static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id)
{
struct dsi_data *dsi = (struct dsi_data *)dev_id;
int old;
old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
if (old) {
cancel_delayed_work(&dsi->te_timeout_work);
_dsi_update(dsi);
}
return IRQ_HANDLED;
}
static void omap_dsi_te_timeout_work_callback(struct work_struct *work)
{
struct dsi_data *dsi =
container_of(work, struct dsi_data, te_timeout_work.work);
int old;
old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
if (old) {
dev_err(dsi->dev, "TE not received for 250ms!\n");
_dsi_update(dsi);
}
}
static int omap_dsi_register_te_irq(struct dsi_data *dsi,
struct mipi_dsi_device *client)
{
int err;
int te_irq;
dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN);
if (IS_ERR(dsi->te_gpio)) {
err = PTR_ERR(dsi->te_gpio);
if (err == -ENOENT) {
dsi->te_gpio = NULL;
return 0;
}
dev_err(dsi->dev, "Could not get TE gpio: %d\n", err);
return err;
}
te_irq = gpiod_to_irq(dsi->te_gpio);
if (te_irq < 0) {
gpiod_put(dsi->te_gpio);
dsi->te_gpio = NULL;
return -EINVAL;
}
dsi->te_irq = te_irq;
irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"TE", dsi);
if (err) {
dev_err(dsi->dev, "request irq failed with %d\n", err);
gpiod_put(dsi->te_gpio);
dsi->te_gpio = NULL;
return err;
}
INIT_DEFERRABLE_WORK(&dsi->te_timeout_work,
omap_dsi_te_timeout_work_callback);
dev_dbg(dsi->dev, "Using GPIO TE\n");
return 0;
}
static void omap_dsi_unregister_te_irq(struct dsi_data *dsi)
{
if (dsi->te_gpio) {
free_irq(dsi->te_irq, dsi);
cancel_delayed_work(&dsi->te_timeout_work);
gpiod_put(dsi->te_gpio);
dsi->te_gpio = NULL;
}
}
static int omap_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *client)
{
struct dsi_data *dsi = host_to_omap(host);
int r;
if (dsi->dsidev) {
DSSERR("dsi client already attached\n");
return -EBUSY;
}
if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) {
DSSERR("invalid pixel format\n");
return -EINVAL;
}
atomic_set(&dsi->do_ext_te_update, 0);
if (client->mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi->mode = OMAP_DSS_DSI_VIDEO_MODE;
} else {
r = omap_dsi_register_te_irq(dsi, client);
if (r)
return r;
dsi->mode = OMAP_DSS_DSI_CMD_MODE;
}
dsi->dsidev = client;
dsi->pix_fmt = client->format;
dsi->config.hs_clk_min = 150000000; // TODO: get from client?
dsi->config.hs_clk_max = client->hs_rate;
dsi->config.lp_clk_min = 7000000; // TODO: get from client?
dsi->config.lp_clk_max = client->lp_rate;
if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE;
else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE;
else
dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE;
return 0;
}
static int omap_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *client)
{
struct dsi_data *dsi = host_to_omap(host);
if (WARN_ON(dsi->dsidev != client))
return -EINVAL;
cancel_delayed_work_sync(&dsi->dsi_disable_work);
dsi_bus_lock(dsi);
if (dsi->iface_enabled)
dsi_disable(dsi);
dsi_bus_unlock(dsi);
omap_dsi_unregister_te_irq(dsi);
dsi->dsidev = NULL;
return 0;
}
static const struct mipi_dsi_host_ops omap_dsi_host_ops = {
.attach = omap_dsi_host_attach,
.detach = omap_dsi_host_detach,
.transfer = omap_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
* PLL
*/
static const struct dss_pll_ops dsi_pll_ops = {
.enable = dsi_pll_enable,
.disable = dsi_pll_disable,
.set_config = dss_pll_write_config_type_a,
};
static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
.type = DSS_PLL_TYPE_A,
.n_max = (1 << 7) - 1,
.m_max = (1 << 11) - 1,
.mX_max = (1 << 4) - 1,
.fint_min = 750000,
.fint_max = 2100000,
.clkdco_low = 1000000000,
.clkdco_max = 1800000000,
.n_msb = 7,
.n_lsb = 1,
.m_msb = 18,
.m_lsb = 8,
.mX_msb[0] = 22,
.mX_lsb[0] = 19,
.mX_msb[1] = 26,
.mX_lsb[1] = 23,
.has_stopmode = true,
.has_freqsel = true,
.has_selfreqdco = false,
.has_refsel = false,
};
static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
.type = DSS_PLL_TYPE_A,
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
.fint_min = 500000,
.fint_max = 2500000,
.clkdco_low = 1000000000,
.clkdco_max = 1800000000,
.n_msb = 8,
.n_lsb = 1,
.m_msb = 20,
.m_lsb = 9,
.mX_msb[0] = 25,
.mX_lsb[0] = 21,
.mX_msb[1] = 30,
.mX_lsb[1] = 26,
.has_stopmode = true,
.has_freqsel = false,
.has_selfreqdco = false,
.has_refsel = false,
};
static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
.type = DSS_PLL_TYPE_A,
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
.fint_min = 150000,
.fint_max = 52000000,
.clkdco_low = 1000000000,
.clkdco_max = 1800000000,
.n_msb = 8,
.n_lsb = 1,
.m_msb = 20,
.m_lsb = 9,
.mX_msb[0] = 25,
.mX_lsb[0] = 21,
.mX_msb[1] = 30,
.mX_lsb[1] = 26,
.has_stopmode = true,
.has_freqsel = false,
.has_selfreqdco = true,
.has_refsel = true,
};
static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi)
{
struct dss_pll *pll = &dsi->pll;
struct clk *clk;
int r;
clk = devm_clk_get(dsi->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
return PTR_ERR(clk);
}
pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1";
pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2;
pll->clkin = clk;
pll->base = dsi->pll_base;
pll->hw = dsi->data->pll_hw;
pll->ops = &dsi_pll_ops;
r = dss_pll_register(dss, pll);
if (r)
return r;
return 0;
}
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
struct dss_device *dss = dss_get_device(master);
struct dsi_data *dsi = dev_get_drvdata(dev);
char name[10];
u32 rev;
int r;
dsi->dss = dss;
dsi_init_pll_data(dss, dsi);
r = dsi_runtime_get(dsi);
if (r)
return r;
rev = dsi_read_reg(dsi, DSI_REVISION);
dev_dbg(dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
dsi_runtime_put(dsi);
snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
dsi_dump_dsi_regs, dsi);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
dsi_dump_dsi_irqs, dsi);
#endif
snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
dsi_dump_dsi_clocks, dsi);
return 0;
}
static void dsi_unbind(struct device *dev, struct device *master, void *data)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
dss_debugfs_remove_file(dsi->debugfs.clks);
dss_debugfs_remove_file(dsi->debugfs.irqs);
dss_debugfs_remove_file(dsi->debugfs.regs);
WARN_ON(dsi->scp_clk_refcount > 0);
dss_pll_unregister(&dsi->pll);
}
static const struct component_ops dsi_component_ops = {
.bind = dsi_bind,
.unbind = dsi_unbind,
};
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static int dsi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
bridge, flags);
}
static enum drm_mode_status
dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
struct dsi_clk_calc_ctx ctx;
int r;
mutex_lock(&dsi->lock);
r = __dsi_calc_config(dsi, mode, &ctx);
mutex_unlock(&dsi->lock);
return r ? MODE_CLOCK_RANGE : MODE_OK;
}
static void dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
dsi_set_config(&dsi->output, adjusted_mode);
}
static void dsi_bridge_enable(struct drm_bridge *bridge)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
struct omap_dss_device *dssdev = &dsi->output;
cancel_delayed_work_sync(&dsi->dsi_disable_work);
dsi_bus_lock(dsi);
if (!dsi->iface_enabled)
dsi_enable(dsi);
dsi_enable_video_output(dssdev, VC_VIDEO);
dsi->video_enabled = true;
dsi_bus_unlock(dsi);
}
static void dsi_bridge_disable(struct drm_bridge *bridge)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
struct omap_dss_device *dssdev = &dsi->output;
cancel_delayed_work_sync(&dsi->dsi_disable_work);
dsi_bus_lock(dsi);
dsi->video_enabled = false;
dsi_disable_video_output(dssdev, VC_VIDEO);
dsi_disable(dsi);
dsi_bus_unlock(dsi);
}
static const struct drm_bridge_funcs dsi_bridge_funcs = {
.attach = dsi_bridge_attach,
.mode_valid = dsi_bridge_mode_valid,
.mode_set = dsi_bridge_mode_set,
.enable = dsi_bridge_enable,
.disable = dsi_bridge_disable,
};
static void dsi_bridge_init(struct dsi_data *dsi)
{
dsi->bridge.funcs = &dsi_bridge_funcs;
dsi->bridge.of_node = dsi->host.dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
drm_bridge_add(&dsi->bridge);
}
static void dsi_bridge_cleanup(struct dsi_data *dsi)
{
drm_bridge_remove(&dsi->bridge);
}
/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
static int dsi_init_output(struct dsi_data *dsi)
{
struct omap_dss_device *out = &dsi->output;
int r;
dsi_bridge_init(dsi);
out->dev = dsi->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
out->dispc_channel = dsi_get_dispc_channel(dsi);
out->dsi_ops = &dsi_ops;
out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
r = omapdss_device_init_output(out, &dsi->bridge);
if (r < 0) {
dsi_bridge_cleanup(dsi);
return r;
}
omapdss_device_register(out);
return 0;
}
static void dsi_uninit_output(struct dsi_data *dsi)
{
struct omap_dss_device *out = &dsi->output;
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
dsi_bridge_cleanup(dsi);
}
static int dsi_probe_of(struct dsi_data *dsi)
{
struct device_node *node = dsi->dev->of_node;
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
int r;
struct device_node *ep;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
prop = of_find_property(ep, "lanes", &len);
if (prop == NULL) {
dev_err(dsi->dev, "failed to find lane data\n");
r = -EINVAL;
goto err;
}
num_pins = len / sizeof(u32);
if (num_pins < 4 || num_pins % 2 != 0 ||
num_pins > dsi->num_lanes_supported * 2) {
dev_err(dsi->dev, "bad number of lanes\n");
r = -EINVAL;
goto err;
}
r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
if (r) {
dev_err(dsi->dev, "failed to read lane data\n");
goto err;
}
r = dsi_configure_pins(dsi, num_pins, lane_arr);
if (r) {
dev_err(dsi->dev, "failed to configure pins");
goto err;
}
of_node_put(ep);
return 0;
err:
of_node_put(ep);
return r;
}
static const struct dsi_of_data dsi_of_data_omap34xx = {
.model = DSI_MODEL_OMAP3,
.pll_hw = &dss_omap3_dsi_pll_hw,
.modules = (const struct dsi_module_id_data[]) {
{ .address = 0x4804fc00, .id = 0, },
{ },
},
.max_fck_freq = 173000000,
.max_pll_lpdiv = (1 << 13) - 1,
.quirks = DSI_QUIRK_REVERSE_TXCLKESC,
};
static const struct dsi_of_data dsi_of_data_omap36xx = {
.model = DSI_MODEL_OMAP3,
.pll_hw = &dss_omap3_dsi_pll_hw,
.modules = (const struct dsi_module_id_data[]) {
{ .address = 0x4804fc00, .id = 0, },
{ },
},
.max_fck_freq = 173000000,
.max_pll_lpdiv = (1 << 13) - 1,
.quirks = DSI_QUIRK_PLL_PWR_BUG,
};
static const struct dsi_of_data dsi_of_data_omap4 = {
.model = DSI_MODEL_OMAP4,
.pll_hw = &dss_omap4_dsi_pll_hw,
.modules = (const struct dsi_module_id_data[]) {
{ .address = 0x58004000, .id = 0, },
{ .address = 0x58005000, .id = 1, },
{ },
},
.max_fck_freq = 170000000,
.max_pll_lpdiv = (1 << 13) - 1,
.quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
| DSI_QUIRK_GNQ,
};
static const struct dsi_of_data dsi_of_data_omap5 = {
.model = DSI_MODEL_OMAP5,
.pll_hw = &dss_omap5_dsi_pll_hw,
.modules = (const struct dsi_module_id_data[]) {
{ .address = 0x58004000, .id = 0, },
{ .address = 0x58009000, .id = 1, },
{ },
},
.max_fck_freq = 209250000,
.max_pll_lpdiv = (1 << 13) - 1,
.quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
| DSI_QUIRK_GNQ | DSI_QUIRK_PHY_DCC,
};
static const struct of_device_id dsi_of_match[] = {
{ .compatible = "ti,omap3-dsi", .data = &dsi_of_data_omap36xx, },
{ .compatible = "ti,omap4-dsi", .data = &dsi_of_data_omap4, },
{ .compatible = "ti,omap5-dsi", .data = &dsi_of_data_omap5, },
{},
};
static const struct soc_device_attribute dsi_soc_devices[] = {
{ .machine = "OMAP3[45]*", .data = &dsi_of_data_omap34xx },
{ .machine = "AM35*", .data = &dsi_of_data_omap34xx },
{ /* sentinel */ }
};
static void omap_dsi_disable_work_callback(struct work_struct *work)
{
struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work);
dsi_bus_lock(dsi);
if (dsi->iface_enabled && !dsi->video_enabled)
dsi_disable(dsi);
dsi_bus_unlock(dsi);
}
static int dsi_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
struct device *dev = &pdev->dev;
struct dsi_data *dsi;
struct resource *dsi_mem;
unsigned int i;
int r;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
dsi->errors = 0;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock_init(&dsi->irq_stats_lock);
dsi->irq_stats.last_reset = jiffies;
#endif
mutex_init(&dsi->lock);
sema_init(&dsi->bus_lock, 1);
INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback);
#ifdef DSI_CATCH_MISSING_TE
timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
dsi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proto");
dsi->proto_base = devm_ioremap_resource(dev, dsi_mem);
if (IS_ERR(dsi->proto_base))
return PTR_ERR(dsi->proto_base);
dsi->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(dsi->phy_base))
return PTR_ERR(dsi->phy_base);
dsi->pll_base = devm_platform_ioremap_resource_byname(pdev, "pll");
if (IS_ERR(dsi->pll_base))
return PTR_ERR(dsi->pll_base);
dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
return -ENODEV;
}
r = devm_request_irq(dev, dsi->irq, omap_dsi_irq_handler,
IRQF_SHARED, dev_name(dev), dsi);
if (r < 0) {
DSSERR("request_irq failed\n");
return r;
}
dsi->vdds_dsi_reg = devm_regulator_get(dev, "vdd");
if (IS_ERR(dsi->vdds_dsi_reg)) {
if (PTR_ERR(dsi->vdds_dsi_reg) != -EPROBE_DEFER)
DSSERR("can't get DSI VDD regulator\n");
return PTR_ERR(dsi->vdds_dsi_reg);
}
soc = soc_device_match(dsi_soc_devices);
if (soc)
dsi->data = soc->data;
else
dsi->data = of_match_node(dsi_of_match, dev->of_node)->data;
d = dsi->data->modules;
while (d->address != 0 && d->address != dsi_mem->start)
d++;
if (d->address == 0) {
DSSERR("unsupported DSI module\n");
return -ENODEV;
}
dsi->module_id = d->id;
if (dsi->data->model == DSI_MODEL_OMAP4 ||
dsi->data->model == DSI_MODEL_OMAP5) {
struct device_node *np;
/*
* The OMAP4/5 display DT bindings don't reference the padconf
* syscon. Our only option to retrieve it is to find it by name.
*/
np = of_find_node_by_name(NULL,
dsi->data->model == DSI_MODEL_OMAP4 ?
"omap4_padconf_global" : "omap5_padconf_global");
if (!np)
return -ENODEV;
dsi->syscon = syscon_node_to_regmap(np);
of_node_put(np);
}
/* DSI VCs initialization */
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++)
dsi->vc[i].source = DSI_VC_SOURCE_L4;
r = dsi_get_clocks(dsi);
if (r)
return r;
pm_runtime_enable(dev);
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ) {
dsi_runtime_get(dsi);
/* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
dsi_runtime_put(dsi);
} else {
dsi->num_lanes_supported = 3;
}
dsi->host.ops = &omap_dsi_host_ops;
dsi->host.dev = &pdev->dev;
r = dsi_probe_of(dsi);
if (r) {
DSSERR("Invalid DSI DT data\n");
goto err_pm_disable;
}
r = mipi_dsi_host_register(&dsi->host);
if (r < 0) {
dev_err(&pdev->dev, "failed to register DSI host: %d\n", r);
goto err_pm_disable;
}
r = dsi_init_output(dsi);
if (r)
goto err_dsi_host_unregister;
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
goto err_uninit_output;
return 0;
err_uninit_output:
dsi_uninit_output(dsi);
err_dsi_host_unregister:
mipi_dsi_host_unregister(&dsi->host);
err_pm_disable:
pm_runtime_disable(dev);
return r;
}
static void dsi_remove(struct platform_device *pdev)
{
struct dsi_data *dsi = platform_get_drvdata(pdev);
component_del(&pdev->dev, &dsi_component_ops);
dsi_uninit_output(dsi);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
}
static __maybe_unused int dsi_runtime_suspend(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
dsi->is_enabled = false;
/* ensure the irq handler sees the is_enabled value */
smp_wmb();
/* wait for current handler to finish before turning the DSI off */
synchronize_irq(dsi->irq);
return 0;
}
static __maybe_unused int dsi_runtime_resume(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
dsi->is_enabled = true;
/* ensure the irq handler sees the is_enabled value */
smp_wmb();
return 0;
}
static const struct dev_pm_ops dsi_pm_ops = {
SET_RUNTIME_PM_OPS(dsi_runtime_suspend, dsi_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
.remove_new = dsi_remove,
.driver = {
.name = "omapdss_dsi",
.pm = &dsi_pm_ops,
.of_match_table = dsi_of_match,
.suppress_bind_attrs = true,
},
};
| linux-master | drivers/gpu/drm/omapdrm/dss/dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*
* VENC settings from TI's DSS driver
*/
#define DSS_SUBSYS_NAME "VENC"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include <drm/drm_bridge.h>
#include "omapdss.h"
#include "dss.h"
/* Venc registers */
#define VENC_REV_ID 0x00
#define VENC_STATUS 0x04
#define VENC_F_CONTROL 0x08
#define VENC_VIDOUT_CTRL 0x10
#define VENC_SYNC_CTRL 0x14
#define VENC_LLEN 0x1C
#define VENC_FLENS 0x20
#define VENC_HFLTR_CTRL 0x24
#define VENC_CC_CARR_WSS_CARR 0x28
#define VENC_C_PHASE 0x2C
#define VENC_GAIN_U 0x30
#define VENC_GAIN_V 0x34
#define VENC_GAIN_Y 0x38
#define VENC_BLACK_LEVEL 0x3C
#define VENC_BLANK_LEVEL 0x40
#define VENC_X_COLOR 0x44
#define VENC_M_CONTROL 0x48
#define VENC_BSTAMP_WSS_DATA 0x4C
#define VENC_S_CARR 0x50
#define VENC_LINE21 0x54
#define VENC_LN_SEL 0x58
#define VENC_L21__WC_CTL 0x5C
#define VENC_HTRIGGER_VTRIGGER 0x60
#define VENC_SAVID__EAVID 0x64
#define VENC_FLEN__FAL 0x68
#define VENC_LAL__PHASE_RESET 0x6C
#define VENC_HS_INT_START_STOP_X 0x70
#define VENC_HS_EXT_START_STOP_X 0x74
#define VENC_VS_INT_START_X 0x78
#define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C
#define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80
#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84
#define VENC_VS_EXT_STOP_Y 0x88
#define VENC_AVID_START_STOP_X 0x90
#define VENC_AVID_START_STOP_Y 0x94
#define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0
#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4
#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8
#define VENC_TVDETGP_INT_START_STOP_X 0xB0
#define VENC_TVDETGP_INT_START_STOP_Y 0xB4
#define VENC_GEN_CTRL 0xB8
#define VENC_OUTPUT_CONTROL 0xC4
#define VENC_OUTPUT_TEST 0xC8
#define VENC_DAC_B__DAC_C 0xC8
struct venc_config {
u32 f_control;
u32 vidout_ctrl;
u32 sync_ctrl;
u32 llen;
u32 flens;
u32 hfltr_ctrl;
u32 cc_carr_wss_carr;
u32 c_phase;
u32 gain_u;
u32 gain_v;
u32 gain_y;
u32 black_level;
u32 blank_level;
u32 x_color;
u32 m_control;
u32 bstamp_wss_data;
u32 s_carr;
u32 line21;
u32 ln_sel;
u32 l21__wc_ctl;
u32 htrigger_vtrigger;
u32 savid__eavid;
u32 flen__fal;
u32 lal__phase_reset;
u32 hs_int_start_stop_x;
u32 hs_ext_start_stop_x;
u32 vs_int_start_x;
u32 vs_int_stop_x__vs_int_start_y;
u32 vs_int_stop_y__vs_ext_start_x;
u32 vs_ext_stop_x__vs_ext_start_y;
u32 vs_ext_stop_y;
u32 avid_start_stop_x;
u32 avid_start_stop_y;
u32 fid_int_start_x__fid_int_start_y;
u32 fid_int_offset_y__fid_ext_start_x;
u32 fid_ext_start_y__fid_ext_offset_y;
u32 tvdetgp_int_start_stop_x;
u32 tvdetgp_int_start_stop_y;
u32 gen_ctrl;
};
/* from TRM */
static const struct venc_config venc_config_pal_trm = {
.f_control = 0,
.vidout_ctrl = 1,
.sync_ctrl = 0x40,
.llen = 0x35F, /* 863 */
.flens = 0x270, /* 624 */
.hfltr_ctrl = 0,
.cc_carr_wss_carr = 0x2F7225ED,
.c_phase = 0,
.gain_u = 0x111,
.gain_v = 0x181,
.gain_y = 0x140,
.black_level = 0x3B,
.blank_level = 0x3B,
.x_color = 0x7,
.m_control = 0x2,
.bstamp_wss_data = 0x3F,
.s_carr = 0x2A098ACB,
.line21 = 0,
.ln_sel = 0x01290015,
.l21__wc_ctl = 0x0000F603,
.htrigger_vtrigger = 0,
.savid__eavid = 0x06A70108,
.flen__fal = 0x00180270,
.lal__phase_reset = 0x00040135,
.hs_int_start_stop_x = 0x00880358,
.hs_ext_start_stop_x = 0x000F035F,
.vs_int_start_x = 0x01A70000,
.vs_int_stop_x__vs_int_start_y = 0x000001A7,
.vs_int_stop_y__vs_ext_start_x = 0x01AF0000,
.vs_ext_stop_x__vs_ext_start_y = 0x000101AF,
.vs_ext_stop_y = 0x00000025,
.avid_start_stop_x = 0x03530083,
.avid_start_stop_y = 0x026C002E,
.fid_int_start_x__fid_int_start_y = 0x0001008A,
.fid_int_offset_y__fid_ext_start_x = 0x002E0138,
.fid_ext_start_y__fid_ext_offset_y = 0x01380001,
.tvdetgp_int_start_stop_x = 0x00140001,
.tvdetgp_int_start_stop_y = 0x00010001,
.gen_ctrl = 0x00FF0000,
};
/* from TRM */
static const struct venc_config venc_config_ntsc_trm = {
.f_control = 0,
.vidout_ctrl = 1,
.sync_ctrl = 0x8040,
.llen = 0x359,
.flens = 0x20C,
.hfltr_ctrl = 0,
.cc_carr_wss_carr = 0x043F2631,
.c_phase = 0,
.gain_u = 0x102,
.gain_v = 0x16C,
.gain_y = 0x12F,
.black_level = 0x43,
.blank_level = 0x38,
.x_color = 0x7,
.m_control = 0x1,
.bstamp_wss_data = 0x38,
.s_carr = 0x21F07C1F,
.line21 = 0,
.ln_sel = 0x01310011,
.l21__wc_ctl = 0x0000F003,
.htrigger_vtrigger = 0,
.savid__eavid = 0x069300F4,
.flen__fal = 0x0016020C,
.lal__phase_reset = 0x00060107,
.hs_int_start_stop_x = 0x008E0350,
.hs_ext_start_stop_x = 0x000F0359,
.vs_int_start_x = 0x01A00000,
.vs_int_stop_x__vs_int_start_y = 0x020701A0,
.vs_int_stop_y__vs_ext_start_x = 0x01AC0024,
.vs_ext_stop_x__vs_ext_start_y = 0x020D01AC,
.vs_ext_stop_y = 0x00000006,
.avid_start_stop_x = 0x03480078,
.avid_start_stop_y = 0x02060024,
.fid_int_start_x__fid_int_start_y = 0x0001008A,
.fid_int_offset_y__fid_ext_start_x = 0x01AC0106,
.fid_ext_start_y__fid_ext_offset_y = 0x01060006,
.tvdetgp_int_start_stop_x = 0x00140001,
.tvdetgp_int_start_stop_y = 0x00010001,
.gen_ctrl = 0x00F90000,
};
enum venc_videomode {
VENC_MODE_UNKNOWN,
VENC_MODE_PAL,
VENC_MODE_NTSC,
};
static const struct drm_display_mode omap_dss_pal_mode = {
.hdisplay = 720,
.hsync_start = 732,
.hsync_end = 796,
.htotal = 864,
.vdisplay = 574,
.vsync_start = 579,
.vsync_end = 584,
.vtotal = 625,
.clock = 13500,
.flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_NVSYNC,
};
static const struct drm_display_mode omap_dss_ntsc_mode = {
.hdisplay = 720,
.hsync_start = 736,
.hsync_end = 800,
.htotal = 858,
.vdisplay = 482,
.vsync_start = 488,
.vsync_end = 494,
.vtotal = 525,
.clock = 13500,
.flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_NVSYNC,
};
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
struct dss_debugfs_entry *debugfs;
struct clk *tv_dac_clk;
const struct venc_config *config;
enum omap_dss_venc_type type;
bool invert_polarity;
bool requires_tv_dac_clk;
struct omap_dss_device output;
struct drm_bridge bridge;
};
#define drm_bridge_to_venc(b) container_of(b, struct venc_device, bridge)
static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
__raw_writel(val, venc->base + idx);
}
static inline u32 venc_read_reg(struct venc_device *venc, int idx)
{
u32 l = __raw_readl(venc->base + idx);
return l;
}
static void venc_write_config(struct venc_device *venc,
const struct venc_config *config)
{
DSSDBG("write venc conf\n");
venc_write_reg(venc, VENC_LLEN, config->llen);
venc_write_reg(venc, VENC_FLENS, config->flens);
venc_write_reg(venc, VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
venc_write_reg(venc, VENC_C_PHASE, config->c_phase);
venc_write_reg(venc, VENC_GAIN_U, config->gain_u);
venc_write_reg(venc, VENC_GAIN_V, config->gain_v);
venc_write_reg(venc, VENC_GAIN_Y, config->gain_y);
venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level);
venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level);
venc_write_reg(venc, VENC_M_CONTROL, config->m_control);
venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data);
venc_write_reg(venc, VENC_S_CARR, config->s_carr);
venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl);
venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid);
venc_write_reg(venc, VENC_FLEN__FAL, config->flen__fal);
venc_write_reg(venc, VENC_LAL__PHASE_RESET, config->lal__phase_reset);
venc_write_reg(venc, VENC_HS_INT_START_STOP_X,
config->hs_int_start_stop_x);
venc_write_reg(venc, VENC_HS_EXT_START_STOP_X,
config->hs_ext_start_stop_x);
venc_write_reg(venc, VENC_VS_INT_START_X, config->vs_int_start_x);
venc_write_reg(venc, VENC_VS_INT_STOP_X__VS_INT_START_Y,
config->vs_int_stop_x__vs_int_start_y);
venc_write_reg(venc, VENC_VS_INT_STOP_Y__VS_EXT_START_X,
config->vs_int_stop_y__vs_ext_start_x);
venc_write_reg(venc, VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
config->vs_ext_stop_x__vs_ext_start_y);
venc_write_reg(venc, VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
venc_write_reg(venc, VENC_AVID_START_STOP_X, config->avid_start_stop_x);
venc_write_reg(venc, VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
venc_write_reg(venc, VENC_FID_INT_START_X__FID_INT_START_Y,
config->fid_int_start_x__fid_int_start_y);
venc_write_reg(venc, VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
config->fid_int_offset_y__fid_ext_start_x);
venc_write_reg(venc, VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
config->fid_ext_start_y__fid_ext_offset_y);
venc_write_reg(venc, VENC_DAC_B__DAC_C,
venc_read_reg(venc, VENC_DAC_B__DAC_C));
venc_write_reg(venc, VENC_VIDOUT_CTRL, config->vidout_ctrl);
venc_write_reg(venc, VENC_HFLTR_CTRL, config->hfltr_ctrl);
venc_write_reg(venc, VENC_X_COLOR, config->x_color);
venc_write_reg(venc, VENC_LINE21, config->line21);
venc_write_reg(venc, VENC_LN_SEL, config->ln_sel);
venc_write_reg(venc, VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
venc_write_reg(venc, VENC_TVDETGP_INT_START_STOP_X,
config->tvdetgp_int_start_stop_x);
venc_write_reg(venc, VENC_TVDETGP_INT_START_STOP_Y,
config->tvdetgp_int_start_stop_y);
venc_write_reg(venc, VENC_GEN_CTRL, config->gen_ctrl);
venc_write_reg(venc, VENC_F_CONTROL, config->f_control);
venc_write_reg(venc, VENC_SYNC_CTRL, config->sync_ctrl);
}
static void venc_reset(struct venc_device *venc)
{
int t = 1000;
venc_write_reg(venc, VENC_F_CONTROL, 1<<8);
while (venc_read_reg(venc, VENC_F_CONTROL) & (1<<8)) {
if (--t == 0) {
DSSERR("Failed to reset venc\n");
return;
}
}
#ifdef CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET
/* the magical sleep that makes things work */
/* XXX more info? What bug this circumvents? */
msleep(20);
#endif
}
static int venc_runtime_get(struct venc_device *venc)
{
int r;
DSSDBG("venc_runtime_get\n");
r = pm_runtime_get_sync(&venc->pdev->dev);
if (WARN_ON(r < 0)) {
pm_runtime_put_noidle(&venc->pdev->dev);
return r;
}
return 0;
}
static void venc_runtime_put(struct venc_device *venc)
{
int r;
DSSDBG("venc_runtime_put\n");
r = pm_runtime_put_sync(&venc->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static int venc_power_on(struct venc_device *venc)
{
u32 l;
int r;
r = venc_runtime_get(venc);
if (r)
goto err0;
venc_reset(venc);
venc_write_config(venc, venc->config);
dss_set_venc_output(venc->dss, venc->type);
dss_set_dac_pwrdn_bgz(venc->dss, 1);
l = 0;
if (venc->type == OMAP_DSS_VENC_TYPE_COMPOSITE)
l |= 1 << 1;
else /* S-Video */
l |= (1 << 0) | (1 << 2);
if (venc->invert_polarity == false)
l |= 1 << 3;
venc_write_reg(venc, VENC_OUTPUT_CONTROL, l);
r = regulator_enable(venc->vdda_dac_reg);
if (r)
goto err1;
r = dss_mgr_enable(&venc->output);
if (r)
goto err2;
return 0;
err2:
regulator_disable(venc->vdda_dac_reg);
err1:
venc_write_reg(venc, VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(venc->dss, 0);
venc_runtime_put(venc);
err0:
return r;
}
static void venc_power_off(struct venc_device *venc)
{
venc_write_reg(venc, VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(venc->dss, 0);
dss_mgr_disable(&venc->output);
regulator_disable(venc->vdda_dac_reg);
venc_runtime_put(venc);
}
static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
{
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
return VENC_MODE_UNKNOWN;
if (mode->clock == omap_dss_pal_mode.clock &&
mode->hdisplay == omap_dss_pal_mode.hdisplay &&
mode->vdisplay == omap_dss_pal_mode.vdisplay)
return VENC_MODE_PAL;
if (mode->clock == omap_dss_ntsc_mode.clock &&
mode->hdisplay == omap_dss_ntsc_mode.hdisplay &&
mode->vdisplay == omap_dss_ntsc_mode.vdisplay)
return VENC_MODE_NTSC;
return VENC_MODE_UNKNOWN;
}
static int venc_dump_regs(struct seq_file *s, void *p)
{
struct venc_device *venc = s->private;
#define DUMPREG(venc, r) \
seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(venc, r))
if (venc_runtime_get(venc))
return 0;
DUMPREG(venc, VENC_F_CONTROL);
DUMPREG(venc, VENC_VIDOUT_CTRL);
DUMPREG(venc, VENC_SYNC_CTRL);
DUMPREG(venc, VENC_LLEN);
DUMPREG(venc, VENC_FLENS);
DUMPREG(venc, VENC_HFLTR_CTRL);
DUMPREG(venc, VENC_CC_CARR_WSS_CARR);
DUMPREG(venc, VENC_C_PHASE);
DUMPREG(venc, VENC_GAIN_U);
DUMPREG(venc, VENC_GAIN_V);
DUMPREG(venc, VENC_GAIN_Y);
DUMPREG(venc, VENC_BLACK_LEVEL);
DUMPREG(venc, VENC_BLANK_LEVEL);
DUMPREG(venc, VENC_X_COLOR);
DUMPREG(venc, VENC_M_CONTROL);
DUMPREG(venc, VENC_BSTAMP_WSS_DATA);
DUMPREG(venc, VENC_S_CARR);
DUMPREG(venc, VENC_LINE21);
DUMPREG(venc, VENC_LN_SEL);
DUMPREG(venc, VENC_L21__WC_CTL);
DUMPREG(venc, VENC_HTRIGGER_VTRIGGER);
DUMPREG(venc, VENC_SAVID__EAVID);
DUMPREG(venc, VENC_FLEN__FAL);
DUMPREG(venc, VENC_LAL__PHASE_RESET);
DUMPREG(venc, VENC_HS_INT_START_STOP_X);
DUMPREG(venc, VENC_HS_EXT_START_STOP_X);
DUMPREG(venc, VENC_VS_INT_START_X);
DUMPREG(venc, VENC_VS_INT_STOP_X__VS_INT_START_Y);
DUMPREG(venc, VENC_VS_INT_STOP_Y__VS_EXT_START_X);
DUMPREG(venc, VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
DUMPREG(venc, VENC_VS_EXT_STOP_Y);
DUMPREG(venc, VENC_AVID_START_STOP_X);
DUMPREG(venc, VENC_AVID_START_STOP_Y);
DUMPREG(venc, VENC_FID_INT_START_X__FID_INT_START_Y);
DUMPREG(venc, VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
DUMPREG(venc, VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
DUMPREG(venc, VENC_TVDETGP_INT_START_STOP_X);
DUMPREG(venc, VENC_TVDETGP_INT_START_STOP_Y);
DUMPREG(venc, VENC_GEN_CTRL);
DUMPREG(venc, VENC_OUTPUT_CONTROL);
DUMPREG(venc, VENC_OUTPUT_TEST);
venc_runtime_put(venc);
#undef DUMPREG
return 0;
}
static int venc_get_clocks(struct venc_device *venc)
{
struct clk *clk;
if (venc->requires_tv_dac_clk) {
clk = devm_clk_get(&venc->pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
return PTR_ERR(clk);
}
} else {
clk = NULL;
}
venc->tv_dac_clk = clk;
return 0;
}
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static int venc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
bridge, flags);
}
static enum drm_mode_status
venc_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
switch (venc_get_videomode(mode)) {
case VENC_MODE_PAL:
case VENC_MODE_NTSC:
return MODE_OK;
default:
return MODE_BAD;
}
}
static bool venc_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
const struct drm_display_mode *venc_mode;
switch (venc_get_videomode(adjusted_mode)) {
case VENC_MODE_PAL:
venc_mode = &omap_dss_pal_mode;
break;
case VENC_MODE_NTSC:
venc_mode = &omap_dss_ntsc_mode;
break;
default:
return false;
}
drm_mode_copy(adjusted_mode, venc_mode);
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
drm_mode_set_name(adjusted_mode);
return true;
}
static void venc_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
enum venc_videomode venc_mode = venc_get_videomode(adjusted_mode);
switch (venc_mode) {
default:
WARN_ON_ONCE(1);
fallthrough;
case VENC_MODE_PAL:
venc->config = &venc_config_pal_trm;
break;
case VENC_MODE_NTSC:
venc->config = &venc_config_ntsc_trm;
break;
}
dispc_set_tv_pclk(venc->dss->dispc, 13500000);
}
static void venc_bridge_enable(struct drm_bridge *bridge)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
venc_power_on(venc);
}
static void venc_bridge_disable(struct drm_bridge *bridge)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
venc_power_off(venc);
}
static int venc_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector)
{
static const struct drm_display_mode *modes[] = {
&omap_dss_pal_mode,
&omap_dss_ntsc_mode,
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(modes); ++i) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, modes[i]);
if (!mode)
return i;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
return ARRAY_SIZE(modes);
}
static const struct drm_bridge_funcs venc_bridge_funcs = {
.attach = venc_bridge_attach,
.mode_valid = venc_bridge_mode_valid,
.mode_fixup = venc_bridge_mode_fixup,
.mode_set = venc_bridge_mode_set,
.enable = venc_bridge_enable,
.disable = venc_bridge_disable,
.get_modes = venc_bridge_get_modes,
};
static void venc_bridge_init(struct venc_device *venc)
{
venc->bridge.funcs = &venc_bridge_funcs;
venc->bridge.of_node = venc->pdev->dev.of_node;
venc->bridge.ops = DRM_BRIDGE_OP_MODES;
venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
venc->bridge.interlace_allowed = true;
drm_bridge_add(&venc->bridge);
}
static void venc_bridge_cleanup(struct venc_device *venc)
{
drm_bridge_remove(&venc->bridge);
}
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
static int venc_bind(struct device *dev, struct device *master, void *data)
{
struct dss_device *dss = dss_get_device(master);
struct venc_device *venc = dev_get_drvdata(dev);
u8 rev_id;
int r;
venc->dss = dss;
r = venc_runtime_get(venc);
if (r)
return r;
rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
dev_dbg(dev, "OMAP VENC rev %d\n", rev_id);
venc_runtime_put(venc);
venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
venc);
return 0;
}
static void venc_unbind(struct device *dev, struct device *master, void *data)
{
struct venc_device *venc = dev_get_drvdata(dev);
dss_debugfs_remove_file(venc->debugfs);
}
static const struct component_ops venc_component_ops = {
.bind = venc_bind,
.unbind = venc_unbind,
};
/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
static int venc_init_output(struct venc_device *venc)
{
struct omap_dss_device *out = &venc->output;
int r;
venc_bridge_init(venc);
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->of_port = 0;
r = omapdss_device_init_output(out, &venc->bridge);
if (r < 0) {
venc_bridge_cleanup(venc);
return r;
}
omapdss_device_register(out);
return 0;
}
static void venc_uninit_output(struct venc_device *venc)
{
omapdss_device_unregister(&venc->output);
omapdss_device_cleanup_output(&venc->output);
venc_bridge_cleanup(venc);
}
static int venc_probe_of(struct venc_device *venc)
{
struct device_node *node = venc->pdev->dev.of_node;
struct device_node *ep;
u32 channels;
int r;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
venc->invert_polarity = of_property_read_bool(ep, "ti,invert-polarity");
r = of_property_read_u32(ep, "ti,channels", &channels);
if (r) {
dev_err(&venc->pdev->dev,
"failed to read property 'ti,channels': %d\n", r);
goto err;
}
switch (channels) {
case 1:
venc->type = OMAP_DSS_VENC_TYPE_COMPOSITE;
break;
case 2:
venc->type = OMAP_DSS_VENC_TYPE_SVIDEO;
break;
default:
dev_err(&venc->pdev->dev, "bad channel property '%d'\n",
channels);
r = -EINVAL;
goto err;
}
of_node_put(ep);
return 0;
err:
of_node_put(ep);
return r;
}
static const struct soc_device_attribute venc_soc_devices[] = {
{ .machine = "OMAP3[45]*" },
{ .machine = "AM35*" },
{ /* sentinel */ }
};
static int venc_probe(struct platform_device *pdev)
{
struct venc_device *venc;
int r;
venc = kzalloc(sizeof(*venc), GFP_KERNEL);
if (!venc)
return -ENOMEM;
venc->pdev = pdev;
platform_set_drvdata(pdev, venc);
/* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
if (soc_device_match(venc_soc_devices))
venc->requires_tv_dac_clk = true;
venc->config = &venc_config_pal_trm;
venc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(venc->base)) {
r = PTR_ERR(venc->base);
goto err_free;
}
venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(venc->vdda_dac_reg)) {
r = PTR_ERR(venc->vdda_dac_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
goto err_free;
}
r = venc_get_clocks(venc);
if (r)
goto err_free;
r = venc_probe_of(venc);
if (r)
goto err_free;
pm_runtime_enable(&pdev->dev);
r = venc_init_output(venc);
if (r)
goto err_pm_disable;
r = component_add(&pdev->dev, &venc_component_ops);
if (r)
goto err_uninit_output;
return 0;
err_uninit_output:
venc_uninit_output(venc);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
err_free:
kfree(venc);
return r;
}
static void venc_remove(struct platform_device *pdev)
{
struct venc_device *venc = platform_get_drvdata(pdev);
component_del(&pdev->dev, &venc_component_ops);
venc_uninit_output(venc);
pm_runtime_disable(&pdev->dev);
kfree(venc);
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
if (venc->tv_dac_clk)
clk_disable_unprepare(venc->tv_dac_clk);
return 0;
}
static __maybe_unused int venc_runtime_resume(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
if (venc->tv_dac_clk)
clk_prepare_enable(venc->tv_dac_clk);
return 0;
}
static const struct dev_pm_ops venc_pm_ops = {
SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static const struct of_device_id venc_of_match[] = {
{ .compatible = "ti,omap2-venc", },
{ .compatible = "ti,omap3-venc", },
{ .compatible = "ti,omap4-venc", },
{},
};
struct platform_driver omap_venchw_driver = {
.probe = venc_probe,
.remove_new = venc_remove,
.driver = {
.name = "omapdss_venc",
.pm = &venc_pm_ops,
.of_match_table = venc_of_match,
.suppress_bind_attrs = true,
},
};
| linux-master | drivers/gpu/drm/omapdrm/dss/venc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
* Author: Archit Taneja <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include "dss.h"
#include "omapdss.h"
int omapdss_device_init_output(struct omap_dss_device *out,
struct drm_bridge *local_bridge)
{
struct device_node *remote_node;
int ret;
remote_node = of_graph_get_remote_node(out->dev->of_node,
out->of_port, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
}
out->bridge = of_drm_find_bridge(remote_node);
out->panel = of_drm_find_panel(remote_node);
if (IS_ERR(out->panel))
out->panel = NULL;
of_node_put(remote_node);
if (out->panel) {
struct drm_bridge *bridge;
bridge = drm_panel_bridge_add(out->panel);
if (IS_ERR(bridge)) {
dev_err(out->dev,
"unable to create panel bridge (%ld)\n",
PTR_ERR(bridge));
ret = PTR_ERR(bridge);
goto error;
}
out->bridge = bridge;
}
if (local_bridge) {
if (!out->bridge) {
ret = -EPROBE_DEFER;
goto error;
}
out->next_bridge = out->bridge;
out->bridge = local_bridge;
}
if (!out->bridge) {
ret = -EPROBE_DEFER;
goto error;
}
return 0;
error:
omapdss_device_cleanup_output(out);
return ret;
}
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
if (out->bridge && out->panel)
drm_panel_bridge_remove(out->next_bridge ?
out->next_bridge : out->bridge);
}
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm)
{
omap_crtc_dss_set_timings(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, vm);
}
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
omap_crtc_dss_set_lcd_config(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, config);
}
int dss_mgr_enable(struct omap_dss_device *dssdev)
{
return omap_crtc_dss_enable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
void dss_mgr_disable(struct omap_dss_device *dssdev)
{
omap_crtc_dss_disable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
omap_crtc_dss_start_update(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
return omap_crtc_dss_register_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
omap_crtc_dss_unregister_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
| linux-master | drivers/gpu/drm/omapdrm/dss/output.c |
// SPDX-License-Identifier: GPL-2.0
#define DSS_SUBSYS_NAME "HDMI"
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/of.h>
#include "omapdss.h"
#include "hdmi.h"
int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
struct hdmi_phy_data *phy)
{
struct property *prop;
int r, len;
prop = of_find_property(ep, "lanes", &len);
if (prop) {
u32 lanes[8];
if (len / sizeof(u32) != ARRAY_SIZE(lanes)) {
dev_err(&pdev->dev, "bad number of lanes\n");
return -EINVAL;
}
r = of_property_read_u32_array(ep, "lanes", lanes,
ARRAY_SIZE(lanes));
if (r) {
dev_err(&pdev->dev, "failed to read lane data\n");
return r;
}
r = hdmi_phy_parse_lanes(phy, lanes);
if (r) {
dev_err(&pdev->dev, "failed to parse lane data\n");
return r;
}
} else {
static const u32 default_lanes[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
r = hdmi_phy_parse_lanes(phy, default_lanes);
if (WARN_ON(r)) {
dev_err(&pdev->dev, "failed to parse lane data\n");
return r;
}
}
return 0;
}
int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
{
u32 deep_color;
bool deep_color_correct = false;
if (n == NULL || cts == NULL)
return -EINVAL;
/* TODO: When implemented, query deep color mode here. */
deep_color = 100;
/*
* When using deep color, the default N value (as in the HDMI
* specification) yields to an non-integer CTS. Hence, we
* modify it while keeping the restrictions described in
* section 7.2.1 of the HDMI 1.4a specification.
*/
switch (sample_freq) {
case 32000:
case 48000:
case 96000:
case 192000:
if (deep_color == 125)
if (pclk == 27027000 || pclk == 74250000)
deep_color_correct = true;
if (deep_color == 150)
if (pclk == 27027000)
deep_color_correct = true;
break;
case 44100:
case 88200:
case 176400:
if (deep_color == 125)
if (pclk == 27027000)
deep_color_correct = true;
break;
default:
return -EINVAL;
}
if (deep_color_correct) {
switch (sample_freq) {
case 32000:
*n = 8192;
break;
case 44100:
*n = 12544;
break;
case 48000:
*n = 8192;
break;
case 88200:
*n = 25088;
break;
case 96000:
*n = 16384;
break;
case 176400:
*n = 50176;
break;
case 192000:
*n = 32768;
break;
default:
return -EINVAL;
}
} else {
switch (sample_freq) {
case 32000:
*n = 4096;
break;
case 44100:
*n = 6272;
break;
case 48000:
*n = 6144;
break;
case 88200:
*n = 12544;
break;
case 96000:
*n = 12288;
break;
case 176400:
*n = 25088;
break;
case 192000:
*n = 24576;
break;
default:
return -EINVAL;
}
}
/* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
*cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10);
return 0;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/hdmi_common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Chandrabhanu Mahapatra <[email protected]>
*/
#include <linux/kernel.h>
#include "omapdss.h"
#include "dispc.h"
static const struct dispc_coef coef3_M8[8] = {
{ 0, 0, 128, 0, 0 },
{ 0, -4, 123, 9, 0 },
{ 0, -4, 108, 24, 0 },
{ 0, -2, 87, 43, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 43, 87, -2, 0 },
{ 0, 24, 108, -4, 0 },
{ 0, 9, 123, -4, 0 },
};
static const struct dispc_coef coef3_M9[8] = {
{ 0, 6, 116, 6, 0 },
{ 0, 0, 112, 16, 0 },
{ 0, -2, 100, 30, 0 },
{ 0, -2, 83, 47, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 47, 83, -2, 0 },
{ 0, 30, 100, -2, 0 },
{ 0, 16, 112, 0, 0 },
};
static const struct dispc_coef coef3_M10[8] = {
{ 0, 10, 108, 10, 0 },
{ 0, 3, 104, 21, 0 },
{ 0, 0, 94, 34, 0 },
{ 0, -1, 80, 49, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 49, 80, -1, 0 },
{ 0, 34, 94, 0, 0 },
{ 0, 21, 104, 3, 0 },
};
static const struct dispc_coef coef3_M11[8] = {
{ 0, 14, 100, 14, 0 },
{ 0, 6, 98, 24, 0 },
{ 0, 2, 90, 36, 0 },
{ 0, 0, 78, 50, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 50, 78, 0, 0 },
{ 0, 36, 90, 2, 0 },
{ 0, 24, 98, 6, 0 },
};
static const struct dispc_coef coef3_M12[8] = {
{ 0, 16, 96, 16, 0 },
{ 0, 9, 93, 26, 0 },
{ 0, 4, 86, 38, 0 },
{ 0, 1, 76, 51, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 51, 76, 1, 0 },
{ 0, 38, 86, 4, 0 },
{ 0, 26, 93, 9, 0 },
};
static const struct dispc_coef coef3_M13[8] = {
{ 0, 18, 92, 18, 0 },
{ 0, 10, 90, 28, 0 },
{ 0, 5, 83, 40, 0 },
{ 0, 1, 75, 52, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 52, 75, 1, 0 },
{ 0, 40, 83, 5, 0 },
{ 0, 28, 90, 10, 0 },
};
static const struct dispc_coef coef3_M14[8] = {
{ 0, 20, 88, 20, 0 },
{ 0, 12, 86, 30, 0 },
{ 0, 6, 81, 41, 0 },
{ 0, 2, 74, 52, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 52, 74, 2, 0 },
{ 0, 41, 81, 6, 0 },
{ 0, 30, 86, 12, 0 },
};
static const struct dispc_coef coef3_M16[8] = {
{ 0, 22, 84, 22, 0 },
{ 0, 14, 82, 32, 0 },
{ 0, 8, 78, 42, 0 },
{ 0, 3, 72, 53, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 53, 72, 3, 0 },
{ 0, 42, 78, 8, 0 },
{ 0, 32, 82, 14, 0 },
};
static const struct dispc_coef coef3_M19[8] = {
{ 0, 24, 80, 24, 0 },
{ 0, 16, 79, 33, 0 },
{ 0, 9, 76, 43, 0 },
{ 0, 4, 70, 54, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 54, 70, 4, 0 },
{ 0, 43, 76, 9, 0 },
{ 0, 33, 79, 16, 0 },
};
static const struct dispc_coef coef3_M22[8] = {
{ 0, 25, 78, 25, 0 },
{ 0, 17, 77, 34, 0 },
{ 0, 10, 74, 44, 0 },
{ 0, 5, 69, 54, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 54, 69, 5, 0 },
{ 0, 44, 74, 10, 0 },
{ 0, 34, 77, 17, 0 },
};
static const struct dispc_coef coef3_M26[8] = {
{ 0, 26, 76, 26, 0 },
{ 0, 19, 74, 35, 0 },
{ 0, 11, 72, 45, 0 },
{ 0, 5, 69, 54, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 54, 69, 5, 0 },
{ 0, 45, 72, 11, 0 },
{ 0, 35, 74, 19, 0 },
};
static const struct dispc_coef coef3_M32[8] = {
{ 0, 27, 74, 27, 0 },
{ 0, 19, 73, 36, 0 },
{ 0, 12, 71, 45, 0 },
{ 0, 6, 68, 54, 0 },
{ 0, 64, 64, 0, 0 },
{ 0, 54, 68, 6, 0 },
{ 0, 45, 71, 12, 0 },
{ 0, 36, 73, 19, 0 },
};
static const struct dispc_coef coef5_M8[8] = {
{ 0, 0, 128, 0, 0 },
{ -2, 14, 125, -10, 1 },
{ -6, 33, 114, -15, 2 },
{ -10, 55, 98, -16, 1 },
{ 0, -14, 78, 78, -14 },
{ 1, -16, 98, 55, -10 },
{ 2, -15, 114, 33, -6 },
{ 1, -10, 125, 14, -2 },
};
static const struct dispc_coef coef5_M9[8] = {
{ -3, 10, 114, 10, -3 },
{ -6, 24, 111, 0, -1 },
{ -8, 40, 103, -7, 0 },
{ -11, 58, 91, -11, 1 },
{ 0, -12, 76, 76, -12 },
{ 1, -11, 91, 58, -11 },
{ 0, -7, 103, 40, -8 },
{ -1, 0, 111, 24, -6 },
};
static const struct dispc_coef coef5_M10[8] = {
{ -4, 18, 100, 18, -4 },
{ -6, 30, 99, 8, -3 },
{ -8, 44, 93, 0, -1 },
{ -9, 58, 84, -5, 0 },
{ 0, -8, 72, 72, -8 },
{ 0, -5, 84, 58, -9 },
{ -1, 0, 93, 44, -8 },
{ -3, 8, 99, 30, -6 },
};
static const struct dispc_coef coef5_M11[8] = {
{ -5, 23, 92, 23, -5 },
{ -6, 34, 90, 13, -3 },
{ -6, 45, 85, 6, -2 },
{ -6, 57, 78, 0, -1 },
{ 0, -4, 68, 68, -4 },
{ -1, 0, 78, 57, -6 },
{ -2, 6, 85, 45, -6 },
{ -3, 13, 90, 34, -6 },
};
static const struct dispc_coef coef5_M12[8] = {
{ -4, 26, 84, 26, -4 },
{ -5, 36, 82, 18, -3 },
{ -4, 46, 78, 10, -2 },
{ -3, 55, 72, 5, -1 },
{ 0, 0, 64, 64, 0 },
{ -1, 5, 72, 55, -3 },
{ -2, 10, 78, 46, -4 },
{ -3, 18, 82, 36, -5 },
};
static const struct dispc_coef coef5_M13[8] = {
{ -3, 28, 78, 28, -3 },
{ -3, 37, 76, 21, -3 },
{ -2, 45, 73, 14, -2 },
{ 0, 53, 68, 8, -1 },
{ 0, 3, 61, 61, 3 },
{ -1, 8, 68, 53, 0 },
{ -2, 14, 73, 45, -2 },
{ -3, 21, 76, 37, -3 },
};
static const struct dispc_coef coef5_M14[8] = {
{ -2, 30, 72, 30, -2 },
{ -1, 37, 71, 23, -2 },
{ 0, 45, 69, 16, -2 },
{ 3, 52, 64, 10, -1 },
{ 0, 6, 58, 58, 6 },
{ -1, 10, 64, 52, 3 },
{ -2, 16, 69, 45, 0 },
{ -2, 23, 71, 37, -1 },
};
static const struct dispc_coef coef5_M16[8] = {
{ 0, 31, 66, 31, 0 },
{ 1, 38, 65, 25, -1 },
{ 3, 44, 62, 20, -1 },
{ 6, 49, 59, 14, 0 },
{ 0, 10, 54, 54, 10 },
{ 0, 14, 59, 49, 6 },
{ -1, 20, 62, 44, 3 },
{ -1, 25, 65, 38, 1 },
};
static const struct dispc_coef coef5_M19[8] = {
{ 3, 32, 58, 32, 3 },
{ 4, 38, 58, 27, 1 },
{ 7, 42, 55, 23, 1 },
{ 10, 46, 54, 18, 0 },
{ 0, 14, 50, 50, 14 },
{ 0, 18, 54, 46, 10 },
{ 1, 23, 55, 42, 7 },
{ 1, 27, 58, 38, 4 },
};
static const struct dispc_coef coef5_M22[8] = {
{ 4, 33, 54, 33, 4 },
{ 6, 37, 54, 28, 3 },
{ 9, 41, 53, 24, 1 },
{ 12, 45, 51, 20, 0 },
{ 0, 16, 48, 48, 16 },
{ 0, 20, 51, 45, 12 },
{ 1, 24, 53, 41, 9 },
{ 3, 28, 54, 37, 6 },
};
static const struct dispc_coef coef5_M26[8] = {
{ 6, 33, 50, 33, 6 },
{ 8, 36, 51, 29, 4 },
{ 11, 40, 50, 25, 2 },
{ 14, 43, 48, 22, 1 },
{ 0, 18, 46, 46, 18 },
{ 1, 22, 48, 43, 14 },
{ 2, 25, 50, 40, 11 },
{ 4, 29, 51, 36, 8 },
};
static const struct dispc_coef coef5_M32[8] = {
{ 7, 33, 48, 33, 7 },
{ 10, 36, 48, 29, 5 },
{ 13, 39, 47, 26, 3 },
{ 16, 42, 46, 23, 1 },
{ 0, 19, 45, 45, 19 },
{ 1, 23, 46, 42, 16 },
{ 3, 26, 47, 39, 13 },
{ 5, 29, 48, 36, 10 },
};
const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps)
{
int i;
static const struct {
int Mmin;
int Mmax;
const struct dispc_coef *coef_3;
const struct dispc_coef *coef_5;
} coefs[] = {
{ 27, 32, coef3_M32, coef5_M32 },
{ 23, 26, coef3_M26, coef5_M26 },
{ 20, 22, coef3_M22, coef5_M22 },
{ 17, 19, coef3_M19, coef5_M19 },
{ 15, 16, coef3_M16, coef5_M16 },
{ 14, 14, coef3_M14, coef5_M14 },
{ 13, 13, coef3_M13, coef5_M13 },
{ 12, 12, coef3_M12, coef5_M12 },
{ 11, 11, coef3_M11, coef5_M11 },
{ 10, 10, coef3_M10, coef5_M10 },
{ 9, 9, coef3_M9, coef5_M9 },
{ 4, 8, coef3_M8, coef5_M8 },
/*
* When upscaling more than two times, blockiness and outlines
* around the image are observed when M8 tables are used. M11,
* M16 and M19 tables are used to prevent this.
*/
{ 3, 3, coef3_M11, coef5_M11 },
{ 2, 2, coef3_M16, coef5_M16 },
{ 0, 1, coef3_M19, coef5_M19 },
};
inc /= 128;
for (i = 0; i < ARRAY_SIZE(coefs); ++i)
if (inc >= coefs[i].Mmin && inc <= coefs[i].Mmax)
return five_taps ? coefs[i].coef_5 : coefs[i].coef_3;
return NULL;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/dispc_coefs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*/
#define DSS_SUBSYS_NAME "PLL"
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include "omapdss.h"
#include "dss.h"
#define PLL_CONTROL 0x0000
#define PLL_STATUS 0x0004
#define PLL_GO 0x0008
#define PLL_CONFIGURATION1 0x000C
#define PLL_CONFIGURATION2 0x0010
#define PLL_CONFIGURATION3 0x0014
#define PLL_SSC_CONFIGURATION1 0x0018
#define PLL_SSC_CONFIGURATION2 0x001C
#define PLL_CONFIGURATION4 0x0020
int dss_pll_register(struct dss_device *dss, struct dss_pll *pll)
{
int i;
for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
if (!dss->plls[i]) {
dss->plls[i] = pll;
pll->dss = dss;
return 0;
}
}
return -EBUSY;
}
void dss_pll_unregister(struct dss_pll *pll)
{
struct dss_device *dss = pll->dss;
int i;
for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
if (dss->plls[i] == pll) {
dss->plls[i] = NULL;
pll->dss = NULL;
return;
}
}
}
struct dss_pll *dss_pll_find(struct dss_device *dss, const char *name)
{
int i;
for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
if (dss->plls[i] && strcmp(dss->plls[i]->name, name) == 0)
return dss->plls[i];
}
return NULL;
}
struct dss_pll *dss_pll_find_by_src(struct dss_device *dss,
enum dss_clk_source src)
{
struct dss_pll *pll;
switch (src) {
default:
case DSS_CLK_SRC_FCK:
return NULL;
case DSS_CLK_SRC_HDMI_PLL:
return dss_pll_find(dss, "hdmi");
case DSS_CLK_SRC_PLL1_1:
case DSS_CLK_SRC_PLL1_2:
case DSS_CLK_SRC_PLL1_3:
pll = dss_pll_find(dss, "dsi0");
if (!pll)
pll = dss_pll_find(dss, "video0");
return pll;
case DSS_CLK_SRC_PLL2_1:
case DSS_CLK_SRC_PLL2_2:
case DSS_CLK_SRC_PLL2_3:
pll = dss_pll_find(dss, "dsi1");
if (!pll)
pll = dss_pll_find(dss, "video1");
return pll;
}
}
unsigned int dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
{
switch (src) {
case DSS_CLK_SRC_HDMI_PLL:
return 0;
case DSS_CLK_SRC_PLL1_1:
case DSS_CLK_SRC_PLL2_1:
return 0;
case DSS_CLK_SRC_PLL1_2:
case DSS_CLK_SRC_PLL2_2:
return 1;
case DSS_CLK_SRC_PLL1_3:
case DSS_CLK_SRC_PLL2_3:
return 2;
default:
return 0;
}
}
int dss_pll_enable(struct dss_pll *pll)
{
int r;
r = clk_prepare_enable(pll->clkin);
if (r)
return r;
if (pll->regulator) {
r = regulator_enable(pll->regulator);
if (r)
goto err_reg;
}
r = pll->ops->enable(pll);
if (r)
goto err_enable;
return 0;
err_enable:
if (pll->regulator)
regulator_disable(pll->regulator);
err_reg:
clk_disable_unprepare(pll->clkin);
return r;
}
void dss_pll_disable(struct dss_pll *pll)
{
pll->ops->disable(pll);
if (pll->regulator)
regulator_disable(pll->regulator);
clk_disable_unprepare(pll->clkin);
memset(&pll->cinfo, 0, sizeof(pll->cinfo));
}
int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo)
{
int r;
r = pll->ops->set_config(pll, cinfo);
if (r)
return r;
pll->cinfo = *cinfo;
return 0;
}
bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
unsigned long out_min, unsigned long out_max,
dss_hsdiv_calc_func func, void *data)
{
const struct dss_pll_hw *hw = pll->hw;
int m, m_start, m_stop;
unsigned long out;
out_min = out_min ? out_min : 1;
out_max = out_max ? out_max : ULONG_MAX;
m_start = max(DIV_ROUND_UP(clkdco, out_max), 1ul);
m_stop = min((unsigned)(clkdco / out_min), hw->mX_max);
for (m = m_start; m <= m_stop; ++m) {
out = clkdco / m;
if (func(m, out, data))
return true;
}
return false;
}
/*
* clkdco = clkin / n * m * 2
* clkoutX = clkdco / mX
*/
bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
unsigned long pll_min, unsigned long pll_max,
dss_pll_calc_func func, void *data)
{
const struct dss_pll_hw *hw = pll->hw;
int n, n_start, n_stop, n_inc;
int m, m_start, m_stop, m_inc;
unsigned long fint, clkdco;
unsigned long pll_hw_max;
unsigned long fint_hw_min, fint_hw_max;
pll_hw_max = hw->clkdco_max;
fint_hw_min = hw->fint_min;
fint_hw_max = hw->fint_max;
n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
n_inc = 1;
if (n_start > n_stop)
return false;
if (hw->errata_i886) {
swap(n_start, n_stop);
n_inc = -1;
}
pll_max = pll_max ? pll_max : ULONG_MAX;
for (n = n_start; n != n_stop; n += n_inc) {
fint = clkin / n;
m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
1ul);
m_stop = min3((unsigned)(pll_max / fint / 2),
(unsigned)(pll_hw_max / fint / 2),
hw->m_max);
m_inc = 1;
if (m_start > m_stop)
continue;
if (hw->errata_i886) {
swap(m_start, m_stop);
m_inc = -1;
}
for (m = m_start; m != m_stop; m += m_inc) {
clkdco = 2 * m * fint;
if (func(n, m, fint, clkdco, data))
return true;
}
}
return false;
}
/*
* This calculates a PLL config that will provide the target_clkout rate
* for clkout. Additionally clkdco rate will be the same as clkout rate
* when clkout rate is >= min_clkdco.
*
* clkdco = clkin / n * m + clkin / n * mf / 262144
* clkout = clkdco / m2
*/
bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
{
unsigned long fint, clkdco, clkout;
unsigned long target_clkdco;
unsigned long min_dco;
unsigned int n, m, mf, m2, sd;
const struct dss_pll_hw *hw = pll->hw;
DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
/* Fint */
n = DIV_ROUND_UP(clkin, hw->fint_max);
fint = clkin / n;
/* adjust m2 so that the clkdco will be high enough */
min_dco = roundup(hw->clkdco_min, fint);
m2 = DIV_ROUND_UP(min_dco, target_clkout);
if (m2 == 0)
m2 = 1;
target_clkdco = target_clkout * m2;
m = target_clkdco / fint;
clkdco = fint * m;
/* adjust clkdco with fractional mf */
if (WARN_ON(target_clkdco - clkdco > fint))
mf = 0;
else
mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
if (mf > 0)
clkdco += (u32)div_u64((u64)mf * fint, 262144);
clkout = clkdco / m2;
/* sigma-delta */
sd = DIV_ROUND_UP(fint * m, 250000000);
DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
n, m, mf, m2, sd);
DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
cinfo->n = n;
cinfo->m = m;
cinfo->mf = mf;
cinfo->mX[0] = m2;
cinfo->sd = sd;
cinfo->fint = fint;
cinfo->clkdco = clkdco;
cinfo->clkout[0] = clkout;
return true;
}
static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
{
unsigned long timeout;
ktime_t wait;
int t;
/* first busyloop to see if the bit changes right away */
t = 100;
while (t-- > 0) {
if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value)
return value;
}
/* then loop for 500ms, sleeping for 1ms in between */
timeout = jiffies + msecs_to_jiffies(500);
while (time_before(jiffies, timeout)) {
if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value)
return value;
wait = ns_to_ktime(1000 * 1000);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
}
return !value;
}
int dss_pll_wait_reset_done(struct dss_pll *pll)
{
void __iomem *base = pll->base;
if (wait_for_bit_change(base + PLL_STATUS, 0, 1) != 1)
return -ETIMEDOUT;
else
return 0;
}
static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
{
int t = 100;
while (t-- > 0) {
u32 v = readl_relaxed(pll->base + PLL_STATUS);
v &= hsdiv_ack_mask;
if (v == hsdiv_ack_mask)
return 0;
}
return -ETIMEDOUT;
}
static bool pll_is_locked(u32 stat)
{
/*
* Required value for each bitfield listed below
*
* PLL_STATUS[6] = 0 PLL_BYPASS
* PLL_STATUS[5] = 0 PLL_HIGHJITTER
*
* PLL_STATUS[3] = 0 PLL_LOSSREF
* PLL_STATUS[2] = 0 PLL_RECAL
* PLL_STATUS[1] = 1 PLL_LOCK
* PLL_STATUS[0] = 1 PLL_CTRL_RESET_DONE
*/
return ((stat & 0x6f) == 0x3);
}
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo)
{
const struct dss_pll_hw *hw = pll->hw;
void __iomem *base = pll->base;
int r = 0;
u32 l;
l = 0;
if (hw->has_stopmode)
l = FLD_MOD(l, 1, 0, 0); /* PLL_STOPMODE */
l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */
l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb); /* PLL_REGM */
/* M4 */
l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0,
hw->mX_msb[0], hw->mX_lsb[0]);
/* M5 */
l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0,
hw->mX_msb[1], hw->mX_lsb[1]);
writel_relaxed(l, base + PLL_CONFIGURATION1);
l = 0;
/* M6 */
l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0,
hw->mX_msb[2], hw->mX_lsb[2]);
/* M7 */
l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0,
hw->mX_msb[3], hw->mX_lsb[3]);
writel_relaxed(l, base + PLL_CONFIGURATION3);
l = readl_relaxed(base + PLL_CONFIGURATION2);
if (hw->has_freqsel) {
u32 f = cinfo->fint < 1000000 ? 0x3 :
cinfo->fint < 1250000 ? 0x4 :
cinfo->fint < 1500000 ? 0x5 :
cinfo->fint < 1750000 ? 0x6 :
0x7;
l = FLD_MOD(l, f, 4, 1); /* PLL_FREQSEL */
} else if (hw->has_selfreqdco) {
u32 f = cinfo->clkdco < hw->clkdco_low ? 0x2 : 0x4;
l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
}
l = FLD_MOD(l, 1, 13, 13); /* PLL_REFEN */
l = FLD_MOD(l, 0, 14, 14); /* PHY_CLKINEN */
l = FLD_MOD(l, 0, 16, 16); /* M4_CLOCK_EN */
l = FLD_MOD(l, 0, 18, 18); /* M5_CLOCK_EN */
l = FLD_MOD(l, 1, 20, 20); /* HSDIVBYPASS */
if (hw->has_refsel)
l = FLD_MOD(l, 3, 22, 21); /* REFSEL = sysclk */
l = FLD_MOD(l, 0, 23, 23); /* M6_CLOCK_EN */
l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
writel_relaxed(l, base + PLL_CONFIGURATION2);
if (hw->errata_i932) {
int cnt = 0;
u32 sleep_time;
const u32 max_lock_retries = 20;
/*
* Calculate wait time for PLL LOCK
* 1000 REFCLK cycles in us.
*/
sleep_time = DIV_ROUND_UP(1000*1000*1000, cinfo->fint);
for (cnt = 0; cnt < max_lock_retries; cnt++) {
writel_relaxed(1, base + PLL_GO); /* PLL_GO */
/**
* read the register back to ensure the write is
* flushed
*/
readl_relaxed(base + PLL_GO);
usleep_range(sleep_time, sleep_time + 5);
l = readl_relaxed(base + PLL_STATUS);
if (pll_is_locked(l) &&
!(readl_relaxed(base + PLL_GO) & 0x1))
break;
}
if (cnt == max_lock_retries) {
DSSERR("cannot lock PLL\n");
r = -EIO;
goto err;
}
} else {
writel_relaxed(1, base + PLL_GO); /* PLL_GO */
if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
DSSERR("DSS DPLL GO bit not going down.\n");
r = -EIO;
goto err;
}
if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
DSSERR("cannot lock DSS DPLL\n");
r = -EIO;
goto err;
}
}
l = readl_relaxed(base + PLL_CONFIGURATION2);
l = FLD_MOD(l, 1, 14, 14); /* PHY_CLKINEN */
l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16); /* M4_CLOCK_EN */
l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18); /* M5_CLOCK_EN */
l = FLD_MOD(l, 0, 20, 20); /* HSDIVBYPASS */
l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23); /* M6_CLOCK_EN */
l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25); /* M7_CLOCK_EN */
writel_relaxed(l, base + PLL_CONFIGURATION2);
r = dss_wait_hsdiv_ack(pll,
(cinfo->mX[0] ? BIT(7) : 0) |
(cinfo->mX[1] ? BIT(8) : 0) |
(cinfo->mX[2] ? BIT(10) : 0) |
(cinfo->mX[3] ? BIT(11) : 0));
if (r) {
DSSERR("failed to enable HSDIV clocks\n");
goto err;
}
err:
return r;
}
int dss_pll_write_config_type_b(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo)
{
const struct dss_pll_hw *hw = pll->hw;
void __iomem *base = pll->base;
u32 l;
l = 0;
l = FLD_MOD(l, cinfo->m, 20, 9); /* PLL_REGM */
l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */
writel_relaxed(l, base + PLL_CONFIGURATION1);
l = readl_relaxed(base + PLL_CONFIGURATION2);
l = FLD_MOD(l, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
l = FLD_MOD(l, 0x1, 13, 13); /* PLL_REFEN */
l = FLD_MOD(l, 0x0, 14, 14); /* PHY_CLKINEN */
if (hw->has_refsel)
l = FLD_MOD(l, 0x3, 22, 21); /* REFSEL = SYSCLK */
/* PLL_SELFREQDCO */
if (cinfo->clkdco > hw->clkdco_low)
l = FLD_MOD(l, 0x4, 3, 1);
else
l = FLD_MOD(l, 0x2, 3, 1);
writel_relaxed(l, base + PLL_CONFIGURATION2);
l = readl_relaxed(base + PLL_CONFIGURATION3);
l = FLD_MOD(l, cinfo->sd, 17, 10); /* PLL_REGSD */
writel_relaxed(l, base + PLL_CONFIGURATION3);
l = readl_relaxed(base + PLL_CONFIGURATION4);
l = FLD_MOD(l, cinfo->mX[0], 24, 18); /* PLL_REGM2 */
l = FLD_MOD(l, cinfo->mf, 17, 0); /* PLL_REGM_F */
writel_relaxed(l, base + PLL_CONFIGURATION4);
writel_relaxed(1, base + PLL_GO); /* PLL_GO */
if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
DSSERR("DSS DPLL GO bit not going down.\n");
return -EIO;
}
if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
DSSERR("cannot lock DSS DPLL\n");
return -ETIMEDOUT;
}
return 0;
}
| linux-master | drivers/gpu/drm/omapdrm/dss/pll.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <drm/panfrost_drm.h>
#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
static void panfrost_gem_free_object(struct drm_gem_object *obj)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
/*
* Make sure the BO is no longer inserted in the shrinker list before
* taking care of the destruction itself. If we don't do that we have a
* race condition between this function and what's done in
* panfrost_gem_shrinker_scan().
*/
mutex_lock(&pfdev->shrinker_lock);
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
/*
* If we still have mappings attached to the BO, there's a problem in
* our refcounting.
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
for (i = 0; i < n_sgt; i++) {
if (bo->sgts[i].sgl) {
dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
DMA_BIDIRECTIONAL, 0);
sg_free_table(&bo->sgts[i]);
}
}
kvfree(bo->sgts);
}
drm_gem_shmem_free(&bo->base);
}
struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv)
{
struct panfrost_gem_mapping *iter, *mapping = NULL;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(iter, &bo->mappings.list, node) {
if (iter->mmu == priv->mmu) {
kref_get(&iter->refcount);
mapping = iter;
break;
}
}
mutex_unlock(&bo->mappings.lock);
return mapping;
}
static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
{
if (mapping->active)
panfrost_mmu_unmap(mapping);
spin_lock(&mapping->mmu->mm_lock);
if (drm_mm_node_allocated(&mapping->mmnode))
drm_mm_remove_node(&mapping->mmnode);
spin_unlock(&mapping->mmu->mm_lock);
}
static void panfrost_gem_mapping_release(struct kref *kref)
{
struct panfrost_gem_mapping *mapping;
mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
panfrost_gem_teardown_mapping(mapping);
drm_gem_object_put(&mapping->obj->base.base);
panfrost_mmu_ctx_put(mapping->mmu);
kfree(mapping);
}
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
{
if (!mapping)
return;
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
size_t size = obj->size;
u64 align;
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_mapping *mapping;
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping)
return -ENOMEM;
INIT_LIST_HEAD(&mapping->node);
kref_init(&mapping->refcount);
drm_gem_object_get(obj);
mapping->obj = bo;
/*
* Executable buffers cannot cross a 16MB boundary as the program
* counter is 24-bits. We assume executable buffers will be less than
* 16MB and aligning executable buffers to their size will avoid
* crossing a 16MB boundary.
*/
if (!bo->noexec)
align = size >> PAGE_SHIFT;
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
spin_lock(&mapping->mmu->mm_lock);
ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&mapping->mmu->mm_lock);
if (ret)
goto err;
if (!bo->is_heap) {
ret = panfrost_mmu_map(mapping);
if (ret)
goto err;
}
mutex_lock(&bo->mappings.lock);
WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
list_add_tail(&mapping->node, &bo->mappings.list);
mutex_unlock(&bo->mappings.lock);
err:
if (ret)
panfrost_gem_mapping_put(mapping);
return ret;
}
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_gem_mapping *mapping = NULL, *iter;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(iter, &bo->mappings.list, node) {
if (iter->mmu == priv->mmu) {
mapping = iter;
list_del(&iter->node);
break;
}
}
mutex_unlock(&bo->mappings.lock);
panfrost_gem_mapping_put(mapping);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (bo->is_heap)
return -EINVAL;
return drm_gem_shmem_pin(&bo->base);
}
static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.free = panfrost_gem_free_object,
.open = panfrost_gem_open,
.close = panfrost_gem_close,
.print_info = drm_gem_shmem_object_print_info,
.pin = panfrost_gem_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
/**
* panfrost_gem_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
* This lets the GEM helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
obj->base.map_wc = !pfdev->coherent;
return &obj->base.base;
}
struct panfrost_gem_object *
panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct panfrost_gem_object *bo;
/* Round up heap allocations to 2MB to keep fault handling simple */
if (flags & PANFROST_BO_HEAP)
size = roundup(size, SZ_2M);
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
bo = to_panfrost_bo(&shmem->base);
bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
bo->is_heap = !!(flags & PANFROST_BO_HEAP);
return bo;
}
struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
struct panfrost_gem_object *bo;
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return ERR_CAST(obj);
bo = to_panfrost_bo(obj);
bo->noexec = true;
return obj;
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_gem.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <[email protected]> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <[email protected]> */
/* Copyright 2019 Collabora ltd. */
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
#include "panfrost_job.h"
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);
static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
{
struct drm_panfrost_get_param *param = data;
struct panfrost_device *pfdev = ddev->dev_private;
if (param->pad != 0)
return -EINVAL;
#define PANFROST_FEATURE(name, member) \
case DRM_PANFROST_PARAM_ ## name: \
param->value = pfdev->features.member; \
break
#define PANFROST_FEATURE_ARRAY(name, member, max) \
case DRM_PANFROST_PARAM_ ## name ## 0 ... \
DRM_PANFROST_PARAM_ ## name ## max: \
param->value = pfdev->features.member[param->param - \
DRM_PANFROST_PARAM_ ## name ## 0]; \
break
switch (param->param) {
PANFROST_FEATURE(GPU_PROD_ID, id);
PANFROST_FEATURE(GPU_REVISION, revision);
PANFROST_FEATURE(SHADER_PRESENT, shader_present);
PANFROST_FEATURE(TILER_PRESENT, tiler_present);
PANFROST_FEATURE(L2_PRESENT, l2_present);
PANFROST_FEATURE(STACK_PRESENT, stack_present);
PANFROST_FEATURE(AS_PRESENT, as_present);
PANFROST_FEATURE(JS_PRESENT, js_present);
PANFROST_FEATURE(L2_FEATURES, l2_features);
PANFROST_FEATURE(CORE_FEATURES, core_features);
PANFROST_FEATURE(TILER_FEATURES, tiler_features);
PANFROST_FEATURE(MEM_FEATURES, mem_features);
PANFROST_FEATURE(MMU_FEATURES, mmu_features);
PANFROST_FEATURE(THREAD_FEATURES, thread_features);
PANFROST_FEATURE(MAX_THREADS, max_threads);
PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
thread_max_workgroup_sz);
PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
thread_max_barrier_sz);
PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
default:
return -EINVAL;
}
return 0;
}
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
struct panfrost_gem_mapping *mapping;
int ret;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
return -EINVAL;
/* Heaps should never be executable */
if ((args->flags & PANFROST_BO_HEAP) &&
!(args->flags & PANFROST_BO_NOEXEC))
return -EINVAL;
bo = panfrost_gem_create(dev, args->size, args->flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
if (ret)
goto out;
mapping = panfrost_gem_mapping_get(bo, priv);
if (mapping) {
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
} else {
/* This can only happen if the handle from
* drm_gem_handle_create() has already been guessed and freed
* by user space
*/
ret = -EINVAL;
}
out:
drm_gem_object_put(&bo->base.base);
return ret;
}
/**
* panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
* referenced by the job.
* @dev: DRM device
* @file_priv: DRM file for this fd
* @args: IOCTL args
* @job: job being set up
*
* Resolve handles from userspace to BOs and attach them to job.
*
* Note that this function doesn't need to unreference the BOs on
* failure, because that will happen at panfrost_job_cleanup() time.
*/
static int
panfrost_lookup_bos(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_object *bo;
unsigned int i;
int ret;
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
return 0;
ret = drm_gem_objects_lookup(file_priv,
(void __user *)(uintptr_t)args->bo_handles,
job->bo_count, &job->bos);
if (ret)
return ret;
job->mappings = kvmalloc_array(job->bo_count,
sizeof(struct panfrost_gem_mapping *),
GFP_KERNEL | __GFP_ZERO);
if (!job->mappings)
return -ENOMEM;
for (i = 0; i < job->bo_count; i++) {
struct panfrost_gem_mapping *mapping;
bo = to_panfrost_bo(job->bos[i]);
mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) {
ret = -EINVAL;
break;
}
atomic_inc(&bo->gpu_usecount);
job->mappings[i] = mapping;
}
return ret;
}
/**
* panfrost_copy_in_sync() - Sets up job->deps with the sync objects
* referenced by the job.
* @dev: DRM device
* @file_priv: DRM file for this fd
* @args: IOCTL args
* @job: job being set up
*
* Resolve syncobjs from userspace to fences and attach them to job.
*
* Note that this function doesn't need to unreference the fences on
* failure, because that will happen at panfrost_job_cleanup() time.
*/
static int
panfrost_copy_in_sync(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
u32 *handles;
int ret = 0;
int i, in_fence_count;
in_fence_count = args->in_sync_count;
if (!in_fence_count)
return 0;
handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
goto fail;
}
if (copy_from_user(handles,
(void __user *)(uintptr_t)args->in_syncs,
in_fence_count * sizeof(u32))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy in syncobj handles\n");
goto fail;
}
for (i = 0; i < in_fence_count; i++) {
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
handles[i], 0);
if (ret)
goto fail;
}
fail:
kvfree(handles);
return ret;
}
static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
int ret = 0, slot;
if (!args->jc)
return -EINVAL;
if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
return -EINVAL;
if (args->out_sync > 0) {
sync_out = drm_syncobj_find(file, args->out_sync);
if (!sync_out)
return -ENODEV;
}
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
goto out_put_syncout;
}
kref_init(&job->refcount);
job->pfdev = pfdev;
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->mmu = file_priv->mmu;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
NULL);
if (ret)
goto out_put_job;
ret = panfrost_copy_in_sync(dev, file, args, job);
if (ret)
goto out_cleanup_job;
ret = panfrost_lookup_bos(dev, file, args, job);
if (ret)
goto out_cleanup_job;
ret = panfrost_job_push(job);
if (ret)
goto out_cleanup_job;
/* Update the return sync object for the job */
if (sync_out)
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
out_cleanup_job:
if (ret)
drm_sched_job_cleanup(&job->base);
out_put_job:
panfrost_job_put(job);
out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
return ret;
}
static int
panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
long ret;
struct drm_panfrost_wait_bo *args = data;
struct drm_gem_object *gem_obj;
unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
if (args->pad)
return -EINVAL;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj)
return -ENOENT;
ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
drm_gem_object_put(gem_obj);
return ret;
}
static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_panfrost_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
int ret;
if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
return -EINVAL;
}
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
/* Don't allow mmapping of heap objects as pages are not pinned. */
if (to_panfrost_bo(gem_obj)->is_heap) {
ret = -EINVAL;
goto out;
}
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
out:
drm_gem_object_put(gem_obj);
return ret;
}
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_panfrost_bo(gem_obj);
mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put(gem_obj);
if (!mapping)
return -EINVAL;
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_panfrost_bo(gem_obj);
ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
if (ret)
goto out_put_object;
mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) {
struct panfrost_gem_mapping *first;
first = list_first_entry(&bo->mappings.list,
struct panfrost_gem_mapping,
node);
/*
* If we want to mark the BO purgeable, there must be only one
* user: the caller FD.
* We could do something smarter and mark the BO purgeable only
* when all its users have marked it purgeable, but globally
* visible/shared BOs are likely to never be marked purgeable
* anyway, so let's not bother.
*/
if (!list_is_singular(&bo->mappings.list) ||
WARN_ON_ONCE(first->mmu != priv->mmu)) {
ret = -EINVAL;
goto out_unlock_mappings;
}
}
args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
list_move_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
out_unlock_mappings:
mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
dma_resv_unlock(bo->base.base.resv);
out_put_object:
drm_gem_object_put(gem_obj);
return ret;
}
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
return -ENOSYS;
return 0;
}
static int
panfrost_open(struct drm_device *dev, struct drm_file *file)
{
int ret;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_file_priv *panfrost_priv;
panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
if (!panfrost_priv)
return -ENOMEM;
panfrost_priv->pfdev = pfdev;
file->driver_priv = panfrost_priv;
panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
if (IS_ERR(panfrost_priv->mmu)) {
ret = PTR_ERR(panfrost_priv->mmu);
goto err_free;
}
ret = panfrost_job_open(panfrost_priv);
if (ret)
goto err_job;
return 0;
err_job:
panfrost_mmu_ctx_put(panfrost_priv->mmu);
err_free:
kfree(panfrost_priv);
return ret;
}
static void
panfrost_postclose(struct drm_device *dev, struct drm_file *file)
{
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
panfrost_perfcnt_close(file);
panfrost_job_close(panfrost_priv);
panfrost_mmu_ctx_put(panfrost_priv->mmu);
kfree(panfrost_priv);
}
static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
#define PANFROST_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW),
PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
/*
* Panfrost driver version:
* - 1.0 - initial interface
* - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
* - 1.2 - adds AFBC_FEATURES query
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = panfrost_open,
.postclose = panfrost_postclose,
.ioctls = panfrost_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
.fops = &panfrost_drm_driver_fops,
.name = "panfrost",
.desc = "panfrost DRM",
.date = "20180908",
.major = 1,
.minor = 2,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
};
static int panfrost_probe(struct platform_device *pdev)
{
struct panfrost_device *pfdev;
struct drm_device *ddev;
int err;
pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
if (!pfdev)
return -ENOMEM;
pfdev->pdev = pdev;
pfdev->dev = &pdev->dev;
platform_set_drvdata(pdev, pfdev);
pfdev->comp = of_device_get_match_data(&pdev->dev);
if (!pfdev->comp)
return -ENODEV;
pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ddev->dev_private = pfdev;
pfdev->ddev = ddev;
mutex_init(&pfdev->shrinker_lock);
INIT_LIST_HEAD(&pfdev->shrinker_list);
err = panfrost_device_init(pfdev);
if (err) {
if (err != -EPROBE_DEFER)
dev_err(&pdev->dev, "Fatal error during GPU init\n");
goto err_out0;
}
pm_runtime_set_active(pfdev->dev);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_enable(pfdev->dev);
pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
pm_runtime_use_autosuspend(pfdev->dev);
/*
* Register the DRM device with the core and the connectors with
* sysfs
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
goto err_out1;
panfrost_gem_shrinker_init(ddev);
return 0;
err_out1:
pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
pm_runtime_set_suspended(pfdev->dev);
err_out0:
drm_dev_put(ddev);
return err;
}
static void panfrost_remove(struct platform_device *pdev)
{
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
struct drm_device *ddev = pfdev->ddev;
drm_dev_unregister(ddev);
panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
pm_runtime_set_suspended(pfdev->dev);
drm_dev_put(ddev);
}
/*
* The OPP core wants the supply names to be NULL terminated, but we need the
* correct num_supplies value for regulator core. Hence, we NULL terminate here
* and then initialize num_supplies with ARRAY_SIZE - 1.
*/
static const char * const default_supplies[] = { "mali", NULL };
static const struct panfrost_compatible default_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.num_pm_domains = 1, /* optional */
.pm_domain_names = NULL,
};
static const struct panfrost_compatible amlogic_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
/*
* The old data with two power supplies for MT8183 is here only to
* keep retro-compatibility with older devicetrees, as DVFS will
* not work with this one.
*
* On new devicetrees please use the _b variant with a single and
* coupled regulators instead.
*/
static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
.supply_names = mediatek_mt8183_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
};
static const char * const mediatek_mt8183_b_supplies[] = { "mali", NULL };
static const struct panfrost_compatible mediatek_mt8183_b_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
};
static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
static const struct panfrost_compatible mediatek_mt8186_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
.pm_domain_names = mediatek_mt8186_pm_domains,
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2",
"core3", "core4" };
static const struct panfrost_compatible mediatek_mt8192_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1,
.supply_names = mediatek_mt8192_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
};
static const struct of_device_id dt_match[] = {
/* Set first to probe before the generic compatibles */
{ .compatible = "amlogic,meson-gxm-mali",
.data = &amlogic_data, },
{ .compatible = "amlogic,meson-g12a-mali",
.data = &amlogic_data, },
{ .compatible = "arm,mali-t604", .data = &default_data, },
{ .compatible = "arm,mali-t624", .data = &default_data, },
{ .compatible = "arm,mali-t628", .data = &default_data, },
{ .compatible = "arm,mali-t720", .data = &default_data, },
{ .compatible = "arm,mali-t760", .data = &default_data, },
{ .compatible = "arm,mali-t820", .data = &default_data, },
{ .compatible = "arm,mali-t830", .data = &default_data, },
{ .compatible = "arm,mali-t860", .data = &default_data, },
{ .compatible = "arm,mali-t880", .data = &default_data, },
{ .compatible = "arm,mali-bifrost", .data = &default_data, },
{ .compatible = "arm,mali-valhall-jm", .data = &default_data, },
{ .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
{ .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data },
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver panfrost_driver = {
.probe = panfrost_probe,
.remove_new = panfrost_remove,
.driver = {
.name = "panfrost",
.pm = pm_ptr(&panfrost_pm_ops),
.of_match_table = dt_match,
},
};
module_platform_driver(panfrost_driver);
MODULE_AUTHOR("Panfrost Project Developers");
MODULE_DESCRIPTION("Panfrost DRM Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panfrost/panfrost_drv.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
#include <drm/panfrost_drm.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/shmem_fs.h>
#include <linux/sizes.h>
#include "panfrost_device.h"
#include "panfrost_mmu.h"
#include "panfrost_gem.h"
#include "panfrost_features.h"
#include "panfrost_regs.h"
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
u32 val;
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending. */
ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
if (ret) {
/* The GPU hung, let's trigger a reset */
panfrost_device_schedule_reset(pfdev);
dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
}
return ret;
}
static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
{
int status;
/* write AS_COMMAND when MMU is ready to accept another command */
status = wait_ready(pfdev, as_nr);
if (!status)
mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
return status;
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
u64 region_start, u64 size)
{
u8 region_width;
u64 region;
u64 region_end = region_start + size;
if (!size)
return;
/*
* The locked region is a naturally aligned power of 2 block encoded as
* log2 minus(1).
* Calculate the desired start/end and look for the highest bit which
* differs. The smallest naturally aligned block must include this bit
* change, the desired region starts with this bit (and subsequent bits)
* zeroed and ends with the bit (and subsequent bits) set to one.
*/
region_width = max(fls64(region_start ^ (region_end - 1)),
const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
/*
* Mask off the low bits of region_start (which would be ignored by
* the hardware anyway)
*/
region_start &= GENMASK_ULL(63, region_width);
region = region_width | region_start;
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
u64 iova, u64 size, u32 op)
{
if (as_nr < 0)
return 0;
if (op != AS_COMMAND_UNLOCK)
lock_region(pfdev, as_nr, iova, size);
/* Run the MMU operation */
write_cmd(pfdev, as_nr, op);
/* Wait for the flush to complete */
return wait_ready(pfdev, as_nr);
}
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
u64 iova, u64 size, u32 op)
{
int ret;
spin_lock(&pfdev->as_lock);
ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
spin_unlock(&pfdev->as_lock);
return ret;
}
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
spin_lock(&pfdev->as_lock);
as = mmu->as;
if (as >= 0) {
int en = atomic_inc_return(&mmu->as_count);
u32 mask = BIT(as) | BIT(16 + as);
/*
* AS can be retained by active jobs or a perfcnt context,
* hence the '+ 1' here.
*/
WARN_ON(en >= (NUM_JOB_SLOTS + 1));
list_move(&mmu->list, &pfdev->as_lru_list);
if (pfdev->as_faulty_mask & mask) {
/* Unhandled pagefault on this AS, the MMU was
* disabled. We need to re-enable the MMU after
* clearing+unmasking the AS interrupts.
*/
mmu_write(pfdev, MMU_INT_CLEAR, mask);
mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
pfdev->as_faulty_mask &= ~mask;
panfrost_mmu_enable(pfdev, mmu);
}
goto out;
}
/* Check for a free AS */
as = ffz(pfdev->as_alloc_mask);
if (!(BIT(as) & pfdev->features.as_present)) {
struct panfrost_mmu *lru_mmu;
list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
if (!atomic_read(&lru_mmu->as_count))
break;
}
WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
list_del_init(&lru_mmu->list);
as = lru_mmu->as;
WARN_ON(as < 0);
lru_mmu->as = -1;
}
/* Assign the free or reclaimed AS to the FD */
mmu->as = as;
set_bit(as, &pfdev->as_alloc_mask);
atomic_set(&mmu->as_count, 1);
list_add(&mmu->list, &pfdev->as_lru_list);
dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
panfrost_mmu_enable(pfdev, mmu);
out:
spin_unlock(&pfdev->as_lock);
return as;
}
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
atomic_dec(&mmu->as_count);
WARN_ON(atomic_read(&mmu->as_count) < 0);
}
void panfrost_mmu_reset(struct panfrost_device *pfdev)
{
struct panfrost_mmu *mmu, *mmu_tmp;
spin_lock(&pfdev->as_lock);
pfdev->as_alloc_mask = 0;
pfdev->as_faulty_mask = 0;
list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
mmu->as = -1;
atomic_set(&mmu->as_count, 0);
list_del_init(&mmu->list);
}
spin_unlock(&pfdev->as_lock);
mmu_write(pfdev, MMU_INT_CLEAR, ~0);
mmu_write(pfdev, MMU_INT_MASK, ~0);
}
static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{
/*
* io-pgtable only operates on multiple pages within a single table
* entry, so we need to split at boundaries of the table size, i.e.
* the next block size up. The distance from address A to the next
* boundary of block size B is logically B - A % B, but in unsigned
* two's complement where B is a power of two we get the equivalence
* B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
*/
size_t blk_offset = -addr % SZ_2M;
if (blk_offset || size < SZ_2M) {
*count = min_not_zero(blk_offset, size) / SZ_4K;
return SZ_4K;
}
blk_offset = -addr % SZ_1G ?: SZ_1G;
*count = min(blk_offset, size) / SZ_2M;
return SZ_2M;
}
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
u64 iova, u64 size)
{
if (mmu->as < 0)
return;
pm_runtime_get_noresume(pfdev->dev);
/* Flush the PTs only if we're already awake */
if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
pm_runtime_put_autosuspend(pfdev->dev);
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
u64 iova, int prot, struct sg_table *sgt)
{
unsigned int count;
struct scatterlist *sgl;
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
u64 start_iova = iova;
for_each_sgtable_dma_sg(sgt, sgl, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) {
size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
GFP_KERNEL, &mapped);
/* Don't get stuck if things have gone wrong */
mapped = max(mapped, pgsize);
iova += mapped;
paddr += mapped;
len -= mapped;
}
}
panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
return 0;
}
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_shmem_object *shmem = &bo->base;
struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(mapping->active))
return 0;
if (bo->noexec)
prot |= IOMMU_NOEXEC;
sgt = drm_gem_shmem_get_pages_sgt(shmem);
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
prot, sgt);
mapping->active = true;
return 0;
}
void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
u64 iova = mapping->mmnode.start << PAGE_SHIFT;
size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
if (WARN_ON(!mapping->active))
return;
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page, pgcount;
size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
if (bo->is_heap)
pgcount = 1;
if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
WARN_ON(unmapped_page != pgsize * pgcount);
}
iova += pgsize * pgcount;
unmapped_len += pgsize * pgcount;
}
panfrost_mmu_flush_range(pfdev, mapping->mmu,
mapping->mmnode.start << PAGE_SHIFT, len);
mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
{}
static void mmu_tlb_sync_context(void *cookie)
{
//struct panfrost_mmu *mmu = cookie;
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
}
static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
void *cookie)
{
mmu_tlb_sync_context(cookie);
}
static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
};
static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{
struct panfrost_gem_mapping *mapping = NULL;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
struct panfrost_mmu *mmu;
spin_lock(&pfdev->as_lock);
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
if (as == mmu->as)
goto found_mmu;
}
goto out;
found_mmu:
spin_lock(&mmu->mm_lock);
drm_mm_for_each_node(node, &mmu->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
mapping = drm_mm_node_to_panfrost_mapping(node);
kref_get(&mapping->refcount);
break;
}
}
spin_unlock(&mmu->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
return mapping;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr)
{
int ret, i;
struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
struct drm_gem_object *obj;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
bomapping = addr_to_mapping(pfdev, as, addr);
if (!bomapping)
return -ENOENT;
bo = bomapping->obj;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
page_offset -= bomapping->mmnode.start;
obj = &bo->base.base;
dma_resv_lock(obj->resv, NULL);
if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) {
ret = -ENOMEM;
goto err_unlock;
}
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
if (!pages) {
kvfree(bo->sgts);
bo->sgts = NULL;
ret = -ENOMEM;
goto err_unlock;
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
/* Pages are already mapped, bail out. */
goto out;
}
}
mapping = bo->base.base.filp->f_mapping;
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages;
}
}
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_pages;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
goto err_map;
mmu_map_sg(pfdev, bomapping->mmu, addr,
IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
out:
dma_resv_unlock(obj->resv);
panfrost_gem_mapping_put(bomapping);
return 0;
err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:
panfrost_gem_mapping_put(bomapping);
return ret;
}
static void panfrost_mmu_release_ctx(struct kref *kref)
{
struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
refcount);
struct panfrost_device *pfdev = mmu->pfdev;
spin_lock(&pfdev->as_lock);
if (mmu->as >= 0) {
pm_runtime_get_noresume(pfdev->dev);
if (pm_runtime_active(pfdev->dev))
panfrost_mmu_disable(pfdev, mmu->as);
pm_runtime_put_autosuspend(pfdev->dev);
clear_bit(mmu->as, &pfdev->as_alloc_mask);
clear_bit(mmu->as, &pfdev->as_in_use_mask);
list_del(&mmu->list);
}
spin_unlock(&pfdev->as_lock);
free_io_pgtable_ops(mmu->pgtbl_ops);
drm_mm_takedown(&mmu->mm);
kfree(mmu);
}
void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
{
kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
}
struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
{
kref_get(&mmu->refcount);
return mmu;
}
#define PFN_4G (SZ_4G >> PAGE_SHIFT)
#define PFN_4G_MASK (PFN_4G - 1)
#define PFN_16M (SZ_16M >> PAGE_SHIFT)
static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start, u64 *end)
{
/* Executable buffers can't start or end on a 4GB boundary */
if (!(color & PANFROST_BO_NOEXEC)) {
u64 next_seg;
if ((*start & PFN_4G_MASK) == 0)
(*start)++;
if ((*end & PFN_4G_MASK) == 0)
(*end)--;
next_seg = ALIGN(*start, PFN_4G);
if (next_seg - *start <= PFN_16M)
*start = next_seg + 1;
*end = min(*end, ALIGN(*start, PFN_4G) - 1);
}
}
struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{
struct panfrost_mmu *mmu;
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
return ERR_PTR(-ENOMEM);
mmu->pfdev = pfdev;
spin_lock_init(&mmu->mm_lock);
/* 4G enough for now. can be 48-bit */
drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
INIT_LIST_HEAD(&mmu->list);
mmu->as = -1;
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
.ias = FIELD_GET(0xff, pfdev->features.mmu_features),
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
mmu);
if (!mmu->pgtbl_ops) {
kfree(mmu);
return ERR_PTR(-EINVAL);
}
kref_init(&mmu->refcount);
return mmu;
}
static const char *access_type_name(struct panfrost_device *pfdev,
u32 fault_status)
{
switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
return "ATOMIC";
else
return "UNKNOWN";
case AS_FAULTSTATUS_ACCESS_TYPE_READ:
return "READ";
case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
return "WRITE";
case AS_FAULTSTATUS_ACCESS_TYPE_EX:
return "EXECUTE";
default:
WARN_ON(1);
return NULL;
}
}
static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
mmu_write(pfdev, MMU_INT_MASK, 0);
return IRQ_WAKE_THREAD;
}
static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
int ret;
while (status) {
u32 as = ffs(status | (status >> 16)) - 1;
u32 mask = BIT(as) | BIT(as + 16);
u64 addr;
u32 fault_status;
u32 exception_type;
u32 access_type;
u32 source_id;
fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
/* decode the fault status */
exception_type = fault_status & 0xFF;
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
/* Page fault only */
ret = -1;
if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
if (ret) {
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
as, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
spin_lock(&pfdev->as_lock);
/* Ignore MMU interrupts on this AS until it's been
* re-enabled.
*/
pfdev->as_faulty_mask |= mask;
/* Disable the MMU to kill jobs on this AS. */
panfrost_mmu_disable(pfdev, as);
spin_unlock(&pfdev->as_lock);
}
status &= ~mask;
/* If we received new MMU interrupts, process them before returning. */
if (!status)
status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
}
spin_lock(&pfdev->as_lock);
mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
spin_unlock(&pfdev->as_lock);
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
int err, irq;
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
if (irq <= 0)
return -ENODEV;
err = devm_request_threaded_irq(pfdev->dev, irq,
panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-mmu",
pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
return err;
}
return 0;
}
void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_mmu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
/* Copyright 2019 Collabora ltd. */
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-resv.h>
#include <drm/gpu_scheduler.h>
#include <drm/panfrost_drm.h>
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
#include "panfrost_job.h"
#include "panfrost_features.h"
#include "panfrost_issues.h"
#include "panfrost_gem.h"
#include "panfrost_regs.h"
#include "panfrost_gpu.h"
#include "panfrost_mmu.h"
#include "panfrost_dump.h"
#define JOB_TIMEOUT_MS 500
#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
#define job_read(dev, reg) readl(dev->iomem + (reg))
struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
u64 emit_seqno;
};
struct panfrost_job_slot {
struct panfrost_queue_state queue[NUM_JOB_SLOTS];
spinlock_t job_lock;
int irq;
};
static struct panfrost_job *
to_panfrost_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct panfrost_job, base);
}
struct panfrost_fence {
struct dma_fence base;
struct drm_device *dev;
/* panfrost seqno for signaled() test */
u64 seqno;
int queue;
};
static inline struct panfrost_fence *
to_panfrost_fence(struct dma_fence *fence)
{
return (struct panfrost_fence *)fence;
}
static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
{
return "panfrost";
}
static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
{
struct panfrost_fence *f = to_panfrost_fence(fence);
switch (f->queue) {
case 0:
return "panfrost-js-0";
case 1:
return "panfrost-js-1";
case 2:
return "panfrost-js-2";
default:
return NULL;
}
}
static const struct dma_fence_ops panfrost_fence_ops = {
.get_driver_name = panfrost_fence_get_driver_name,
.get_timeline_name = panfrost_fence_get_timeline_name,
};
static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
{
struct panfrost_fence *fence;
struct panfrost_job_slot *js = pfdev->js;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return ERR_PTR(-ENOMEM);
fence->dev = pfdev->ddev;
fence->queue = js_num;
fence->seqno = ++js->queue[js_num].emit_seqno;
dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
js->queue[js_num].fence_context, fence->seqno);
return &fence->base;
}
int panfrost_job_get_slot(struct panfrost_job *job)
{
/* JS0: fragment jobs.
* JS1: vertex/tiler jobs
* JS2: compute jobs
*/
if (job->requirements & PANFROST_JD_REQ_FS)
return 0;
/* Not exposed to userspace yet */
#if 0
if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
(job->pfdev->features.nr_core_groups == 2))
return 2;
if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
return 2;
}
#endif
return 1;
}
static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
u32 requirements,
int js)
{
u64 affinity;
/*
* Use all cores for now.
* Eventually we may need to support tiler only jobs and h/w with
* multiple (2) coherent core groups
*/
affinity = pfdev->features.shader_present;
job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
}
static u32
panfrost_get_job_chain_flag(const struct panfrost_job *job)
{
struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
return 0;
return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
}
static struct panfrost_job *
panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
{
struct panfrost_job *job = pfdev->jobs[slot][0];
WARN_ON(!job);
pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
pfdev->jobs[slot][1] = NULL;
return job;
}
static unsigned int
panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
struct panfrost_job *job)
{
if (WARN_ON(!job))
return 0;
if (!pfdev->jobs[slot][0]) {
pfdev->jobs[slot][0] = job;
return 0;
}
WARN_ON(pfdev->jobs[slot][1]);
pfdev->jobs[slot][1] = job;
WARN_ON(panfrost_get_job_chain_flag(job) ==
panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
return 1;
}
static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
{
struct panfrost_device *pfdev = job->pfdev;
unsigned int subslot;
u32 cfg;
u64 jc_head = job->jc;
int ret;
panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
return;
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
return;
}
cfg = panfrost_mmu_as_get(pfdev, job->mmu);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
panfrost_job_write_affinity(pfdev, job->requirements, js);
/* start MMU, medium priority, cache clean/flush on end, clean/flush on
* start */
cfg |= JS_CONFIG_THREAD_PRI(8) |
JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
panfrost_get_job_chain_flag(job);
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
cfg |= JS_CONFIG_START_MMU;
job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
/* GO ! */
spin_lock(&pfdev->js->job_lock);
subslot = panfrost_enqueue_job(pfdev, js, job);
/* Don't queue the job if a reset is in progress */
if (!atomic_read(&pfdev->reset.pending)) {
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
dev_dbg(pfdev->dev,
"JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
job, js, subslot, jc_head, cfg & 0xf);
}
spin_unlock(&pfdev->js->job_lock);
}
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int bo_count,
struct drm_sched_job *job)
{
int i, ret;
for (i = 0; i < bo_count; i++) {
ret = dma_resv_reserve_fences(bos[i]->resv, 1);
if (ret)
return ret;
/* panfrost always uses write mode in its current uapi */
ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
true);
if (ret)
return ret;
}
return 0;
}
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int bo_count,
struct dma_fence *fence)
{
int i;
for (i = 0; i < bo_count; i++)
dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
}
int panfrost_job_push(struct panfrost_job *job)
{
struct panfrost_device *pfdev = job->pfdev;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
ret = drm_gem_lock_reservations(job->bos, job->bo_count,
&acquire_ctx);
if (ret)
return ret;
mutex_lock(&pfdev->sched_lock);
drm_sched_job_arm(&job->base);
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
&job->base);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
}
kref_get(&job->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&job->base);
mutex_unlock(&pfdev->sched_lock);
panfrost_attach_object_fences(job->bos, job->bo_count,
job->render_done_fence);
unlock:
drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
return ret;
}
static void panfrost_job_cleanup(struct kref *ref)
{
struct panfrost_job *job = container_of(ref, struct panfrost_job,
refcount);
unsigned int i;
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
if (job->mappings) {
for (i = 0; i < job->bo_count; i++) {
if (!job->mappings[i])
break;
atomic_dec(&job->mappings[i]->obj->gpu_usecount);
panfrost_gem_mapping_put(job->mappings[i]);
}
kvfree(job->mappings);
}
if (job->bos) {
for (i = 0; i < job->bo_count; i++)
drm_gem_object_put(job->bos[i]);
kvfree(job->bos);
}
kfree(job);
}
void panfrost_job_put(struct panfrost_job *job)
{
kref_put(&job->refcount, panfrost_job_cleanup);
}
static void panfrost_job_free(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
drm_sched_job_cleanup(sched_job);
panfrost_job_put(job);
}
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL;
if (unlikely(job->base.s_fence->finished.error))
return NULL;
/* Nothing to execute: can happen if the job has finished while
* we were resetting the GPU.
*/
if (!job->jc)
return NULL;
fence = panfrost_fence_create(pfdev, slot);
if (IS_ERR(fence))
return fence;
if (job->done_fence)
dma_fence_put(job->done_fence);
job->done_fence = dma_fence_get(fence);
panfrost_job_hw_submit(job, slot);
return fence;
}
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
{
int j;
u32 irq_mask = 0;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
irq_mask |= MK_JS_MASK(j);
}
job_write(pfdev, JOB_INT_CLEAR, irq_mask);
job_write(pfdev, JOB_INT_MASK, irq_mask);
}
static void panfrost_job_handle_err(struct panfrost_device *pfdev,
struct panfrost_job *job,
unsigned int js)
{
u32 js_status = job_read(pfdev, JS_STATUS(js));
const char *exception_name = panfrost_exception_name(js_status);
bool signal_fence = true;
if (!panfrost_exception_is_fault(js_status)) {
dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
js, exception_name,
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)));
} else {
dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
js, exception_name,
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)));
}
if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) {
/* Update the job head so we can resume */
job->jc = job_read(pfdev, JS_TAIL_LO(js)) |
((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32);
/* The job will be resumed, don't signal the fence */
signal_fence = false;
} else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) {
/* Job has been hard-stopped, flag it as canceled */
dma_fence_set_error(job->done_fence, -ECANCELED);
job->jc = 0;
} else if (panfrost_exception_is_fault(js_status)) {
/* We might want to provide finer-grained error code based on
* the exception type, but unconditionally setting to EINVAL
* is good enough for now.
*/
dma_fence_set_error(job->done_fence, -EINVAL);
job->jc = 0;
}
panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
if (signal_fence)
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
if (panfrost_exception_needs_reset(pfdev, js_status)) {
atomic_set(&pfdev->reset.pending, 1);
drm_sched_fault(&pfdev->js->queue[js].sched);
}
}
static void panfrost_job_handle_done(struct panfrost_device *pfdev,
struct panfrost_job *job)
{
/* Set ->jc to 0 to avoid re-submitting an already finished job (can
* happen when we receive the DONE interrupt while doing a GPU reset).
*/
job->jc = 0;
panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
}
static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
{
struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
u32 js_state = 0, js_events = 0;
unsigned int i, j;
/* First we collect all failed/done jobs. */
while (status) {
u32 js_state_mask = 0;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
if (status & MK_JS_MASK(j))
js_state_mask |= MK_JS_MASK(j);
if (status & JOB_INT_MASK_DONE(j)) {
if (done[j][0])
done[j][1] = panfrost_dequeue_job(pfdev, j);
else
done[j][0] = panfrost_dequeue_job(pfdev, j);
}
if (status & JOB_INT_MASK_ERR(j)) {
/* Cancel the next submission. Will be submitted
* after we're done handling this failure if
* there's no reset pending.
*/
job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
failed[j] = panfrost_dequeue_job(pfdev, j);
}
}
/* JS_STATE is sampled when JOB_INT_CLEAR is written.
* For each BIT(slot) or BIT(slot + 16) bit written to
* JOB_INT_CLEAR, the corresponding bits in JS_STATE
* (BIT(slot) and BIT(slot + 16)) are updated, but this
* is racy. If we only have one job done at the time we
* read JOB_INT_RAWSTAT but the second job fails before we
* clear the status, we end up with a status containing
* only the DONE bit and consider both jobs as DONE since
* JS_STATE reports both NEXT and CURRENT as inactive.
* To prevent that, let's repeat this clear+read steps
* until status is 0.
*/
job_write(pfdev, JOB_INT_CLEAR, status);
js_state &= ~js_state_mask;
js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask;
js_events |= status;
status = job_read(pfdev, JOB_INT_RAWSTAT);
}
/* Then we handle the dequeued jobs. */
for (j = 0; j < NUM_JOB_SLOTS; j++) {
if (!(js_events & MK_JS_MASK(j)))
continue;
if (failed[j]) {
panfrost_job_handle_err(pfdev, failed[j], j);
} else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) {
/* When the current job doesn't fail, the JM dequeues
* the next job without waiting for an ACK, this means
* we can have 2 jobs dequeued and only catch the
* interrupt when the second one is done. If both slots
* are inactive, but one job remains in pfdev->jobs[j],
* consider it done. Of course that doesn't apply if a
* failure happened since we cancelled execution of the
* job in _NEXT (see above).
*/
if (WARN_ON(!done[j][0]))
done[j][0] = panfrost_dequeue_job(pfdev, j);
else
done[j][1] = panfrost_dequeue_job(pfdev, j);
}
for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
panfrost_job_handle_done(pfdev, done[j][i]);
}
/* And finally we requeue jobs that were waiting in the second slot
* and have been stopped if we detected a failure on the first slot.
*/
for (j = 0; j < NUM_JOB_SLOTS; j++) {
if (!(js_events & MK_JS_MASK(j)))
continue;
if (!failed[j] || !pfdev->jobs[j][0])
continue;
if (pfdev->jobs[j][0]->jc == 0) {
/* The job was cancelled, signal the fence now */
struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
dma_fence_set_error(canceled->done_fence, -ECANCELED);
panfrost_job_handle_done(pfdev, canceled);
} else if (!atomic_read(&pfdev->reset.pending)) {
/* Requeue the job we removed if no reset is pending */
job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
}
}
}
static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
{
u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
while (status) {
pm_runtime_mark_last_busy(pfdev->dev);
spin_lock(&pfdev->js->job_lock);
panfrost_job_handle_irq(pfdev, status);
spin_unlock(&pfdev->js->job_lock);
status = job_read(pfdev, JOB_INT_RAWSTAT);
}
}
static u32 panfrost_active_slots(struct panfrost_device *pfdev,
u32 *js_state_mask, u32 js_state)
{
u32 rawstat;
if (!(js_state & *js_state_mask))
return 0;
rawstat = job_read(pfdev, JOB_INT_RAWSTAT);
if (rawstat) {
unsigned int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
if (rawstat & MK_JS_MASK(i))
*js_state_mask &= ~MK_JS_MASK(i);
}
}
return js_state & *js_state_mask;
}
static void
panfrost_reset(struct panfrost_device *pfdev,
struct drm_sched_job *bad)
{
u32 js_state, js_state_mask = 0xffffffff;
unsigned int i, j;
bool cookie;
int ret;
if (!atomic_read(&pfdev->reset.pending))
return;
/* Stop the schedulers.
*
* FIXME: We temporarily get out of the dma_fence_signalling section
* because the cleanup path generate lockdep splats when taking locks
* to release job resources. We should rework the code to follow this
* pattern:
*
* try_lock
* if (locked)
* release
* else
* schedule_work_to_release_later
*/
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_stop(&pfdev->js->queue[i].sched, bad);
cookie = dma_fence_begin_signalling();
if (bad)
drm_sched_increase_karma(bad);
/* Mask job interrupts and synchronize to make sure we won't be
* interrupted during our reset.
*/
job_write(pfdev, JOB_INT_MASK, 0);
synchronize_irq(pfdev->js->irq);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* Cancel the next job and soft-stop the running job. */
job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP);
}
/* Wait at most 10ms for soft-stops to complete */
ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state,
!panfrost_active_slots(pfdev, &js_state_mask, js_state),
10, 10000);
if (ret)
dev_err(pfdev->dev, "Soft-stop failed\n");
/* Handle the remaining interrupts before we reset. */
panfrost_job_handle_irqs(pfdev);
/* Remaining interrupts have been handled, but we might still have
* stuck jobs. Let's make sure the PM counters stay balanced by
* manually calling pm_runtime_put_noidle() and
* panfrost_devfreq_record_idle() for each stuck job.
*/
spin_lock(&pfdev->js->job_lock);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
pm_runtime_put_noidle(pfdev->dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
}
}
memset(pfdev->jobs, 0, sizeof(pfdev->jobs));
spin_unlock(&pfdev->js->job_lock);
/* Proceed with reset now. */
panfrost_device_reset(pfdev);
/* panfrost_device_reset() unmasks job interrupts, but we want to
* keep them masked a bit longer.
*/
job_write(pfdev, JOB_INT_MASK, 0);
/* GPU has been reset, we can clear the reset pending bit. */
atomic_set(&pfdev->reset.pending, 0);
/* Now resubmit jobs that were previously queued but didn't have a
* chance to finish.
* FIXME: We temporarily get out of the DMA fence signalling section
* while resubmitting jobs because the job submission logic will
* allocate memory with the GFP_KERNEL flag which can trigger memory
* reclaim and exposes a lock ordering issue.
*/
dma_fence_end_signalling(cookie);
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
cookie = dma_fence_begin_signalling();
/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_start(&pfdev->js->queue[i].sched, true);
/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
GENMASK(NUM_JOB_SLOTS - 1, 0));
dma_fence_end_signalling(cookie);
}
static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
*sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
int js = panfrost_job_get_slot(job);
/*
* If the GPU managed to complete this jobs fence, the timeout is
* spurious. Bail out.
*/
if (dma_fence_is_signaled(job->done_fence))
return DRM_GPU_SCHED_STAT_NOMINAL;
/*
* Panfrost IRQ handler may take a long time to process an interrupt
* if there is another IRQ handler hogging the processing.
* For example, the HDMI encoder driver might be stuck in the IRQ
* handler for a significant time in a case of bad cable connection.
* In order to catch such cases and not report spurious Panfrost
* job timeouts, synchronize the IRQ handler and re-check the fence
* status.
*/
synchronize_irq(pfdev->js->irq);
if (dma_fence_is_signaled(job->done_fence)) {
dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
return DRM_GPU_SCHED_STAT_NOMINAL;
}
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
job_read(pfdev, JS_CONFIG(js)),
job_read(pfdev, JS_STATUS(js)),
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
panfrost_core_dump(job);
atomic_set(&pfdev->reset.pending, 1);
panfrost_reset(pfdev, sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void panfrost_reset_work(struct work_struct *work)
{
struct panfrost_device *pfdev;
pfdev = container_of(work, struct panfrost_device, reset.work);
panfrost_reset(pfdev, NULL);
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
.run_job = panfrost_job_run,
.timedout_job = panfrost_job_timedout,
.free_job = panfrost_job_free
};
static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
panfrost_job_handle_irqs(pfdev);
job_write(pfdev, JOB_INT_MASK,
GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
GENMASK(NUM_JOB_SLOTS - 1, 0));
return IRQ_HANDLED;
}
static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status = job_read(pfdev, JOB_INT_STAT);
if (!status)
return IRQ_NONE;
job_write(pfdev, JOB_INT_MASK, 0);
return IRQ_WAKE_THREAD;
}
int panfrost_job_init(struct panfrost_device *pfdev)
{
struct panfrost_job_slot *js;
unsigned int nentries = 2;
int ret, j;
/* All GPUs have two entries per queue, but without jobchain
* disambiguation stopping the right job in the close path is tricky,
* so let's just advertise one entry in that case.
*/
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
nentries = 1;
pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
if (!js)
return -ENOMEM;
INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
spin_lock_init(&js->job_lock);
js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
if (js->irq <= 0)
return -ENODEV;
ret = devm_request_threaded_irq(pfdev->dev, js->irq,
panfrost_job_irq_handler,
panfrost_job_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-job",
pfdev);
if (ret) {
dev_err(pfdev->dev, "failed to request job irq");
return ret;
}
pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
if (!pfdev->reset.wq)
return -ENOMEM;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched,
&panfrost_sched_ops,
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
pfdev->reset.wq,
NULL, "pan_js", pfdev->dev);
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
}
}
panfrost_job_enable_interrupts(pfdev);
return 0;
err_sched:
for (j--; j >= 0; j--)
drm_sched_fini(&js->queue[j].sched);
destroy_workqueue(pfdev->reset.wq);
return ret;
}
void panfrost_job_fini(struct panfrost_device *pfdev)
{
struct panfrost_job_slot *js = pfdev->js;
int j;
job_write(pfdev, JOB_INT_MASK, 0);
for (j = 0; j < NUM_JOB_SLOTS; j++) {
drm_sched_fini(&js->queue[j].sched);
}
cancel_work_sync(&pfdev->reset.work);
destroy_workqueue(pfdev->reset.wq);
}
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
{
struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
struct drm_gpu_scheduler *sched;
int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
sched = &js->queue[i].sched;
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (WARN_ON(ret))
return ret;
}
return 0;
}
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
{
struct panfrost_device *pfdev = panfrost_priv->pfdev;
int i;
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
/* Kill in-flight jobs */
spin_lock(&pfdev->js->job_lock);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
int j;
for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
struct panfrost_job *job = pfdev->jobs[i][j];
u32 cmd;
if (!job || job->base.entity != entity)
continue;
if (j == 1) {
/* Try to cancel the job before it starts */
job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
/* Reset the job head so it doesn't get restarted if
* the job in the first slot failed.
*/
job->jc = 0;
}
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
cmd = panfrost_get_job_chain_flag(job) ?
JS_COMMAND_HARD_STOP_1 :
JS_COMMAND_HARD_STOP_0;
} else {
cmd = JS_COMMAND_HARD_STOP;
}
job_write(pfdev, JS_COMMAND(i), cmd);
}
}
spin_unlock(&pfdev->js->job_lock);
}
int panfrost_job_is_idle(struct panfrost_device *pfdev)
{
struct panfrost_job_slot *js = pfdev->js;
int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
}
return true;
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_job.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 Arm Ltd.
*
* Based on msm_gem_freedreno.c:
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/list.h>
#include <drm/drm_device.h>
#include <drm/drm_gem_shmem_helper.h>
#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
static unsigned long
panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct panfrost_device *pfdev =
container_of(shrinker, struct panfrost_device, shrinker);
struct drm_gem_shmem_object *shmem;
unsigned long count = 0;
if (!mutex_trylock(&pfdev->shrinker_lock))
return 0;
list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
if (drm_gem_shmem_is_purgeable(shmem))
count += shmem->base.size >> PAGE_SHIFT;
}
mutex_unlock(&pfdev->shrinker_lock);
return count;
}
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
bool ret = false;
if (atomic_read(&bo->gpu_usecount))
return false;
if (!mutex_trylock(&bo->mappings.lock))
return false;
if (!dma_resv_trylock(shmem->base.resv))
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge(&bo->base);
ret = true;
dma_resv_unlock(shmem->base.resv);
unlock_mappings:
mutex_unlock(&bo->mappings.lock);
return ret;
}
static unsigned long
panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct panfrost_device *pfdev =
container_of(shrinker, struct panfrost_device, shrinker);
struct drm_gem_shmem_object *shmem, *tmp;
unsigned long freed = 0;
if (!mutex_trylock(&pfdev->shrinker_lock))
return SHRINK_STOP;
list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
if (freed >= sc->nr_to_scan)
break;
if (drm_gem_shmem_is_purgeable(shmem) &&
panfrost_gem_purge(&shmem->base)) {
freed += shmem->base.size >> PAGE_SHIFT;
list_del_init(&shmem->madv_list);
}
}
mutex_unlock(&pfdev->shrinker_lock);
if (freed > 0)
pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
return freed;
}
/**
* panfrost_gem_shrinker_init - Initialize panfrost shrinker
* @dev: DRM device
*
* This function registers and sets up the panfrost shrinker.
*/
void panfrost_gem_shrinker_init(struct drm_device *dev)
{
struct panfrost_device *pfdev = dev->dev_private;
pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
pfdev->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&pfdev->shrinker, "drm-panfrost"));
}
/**
* panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
* @dev: DRM device
*
* This function unregisters the panfrost shrinker.
*/
void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
{
struct panfrost_device *pfdev = dev->dev_private;
if (pfdev->shrinker.nr_deferred) {
unregister_shrinker(&pfdev->shrinker);
}
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Collabora Ltd */
#include <linux/completion.h>
#include <linux/iopoll.h>
#include <linux/iosys-map.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/panfrost_drm.h>
#include "panfrost_device.h"
#include "panfrost_features.h"
#include "panfrost_gem.h"
#include "panfrost_issues.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
#include "panfrost_perfcnt.h"
#include "panfrost_regs.h"
#define COUNTERS_PER_BLOCK 64
#define BYTES_PER_COUNTER 4
#define BLOCKS_PER_COREGROUP 8
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
struct mutex lock;
struct completion dump_comp;
};
void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev)
{
complete(&pfdev->perfcnt->dump_comp);
}
void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev)
{
gpu_write(pfdev, GPU_CMD, GPU_CMD_CLEAN_CACHES);
}
static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
{
u64 gpuva;
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
gpu_write(pfdev, GPU_INT_CLEAR,
GPU_IRQ_CLEAN_CACHES_COMPLETED |
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
gpu_write(pfdev, GPU_CMD, GPU_CMD_PERFCNT_SAMPLE);
ret = wait_for_completion_interruptible_timeout(&pfdev->perfcnt->dump_comp,
msecs_to_jiffies(1000));
if (!ret)
ret = -ETIMEDOUT;
else if (ret > 0)
ret = 0;
return ret;
}
static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
struct drm_file *file_priv,
unsigned int counterset)
{
struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct iosys_map map;
struct drm_gem_shmem_object *bo;
u32 cfg, as;
int ret;
if (user == perfcnt->user)
return 0;
else if (perfcnt->user)
return -EBUSY;
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
goto err_put_pm;
bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto err_put_pm;
}
/* Map the perfcnt buf in the address space attached to file_priv. */
ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
user);
if (!perfcnt->mapping) {
ret = -EINVAL;
goto err_close_bo;
}
ret = drm_gem_vmap_unlocked(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
/*
* Invalidate the cache and clear the counters to start from a fresh
* state.
*/
reinit_completion(&pfdev->perfcnt->dump_comp);
gpu_write(pfdev, GPU_INT_CLEAR,
GPU_IRQ_CLEAN_CACHES_COMPLETED |
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
gpu_write(pfdev, GPU_CMD, GPU_CMD_PERFCNT_CLEAR);
gpu_write(pfdev, GPU_CMD, GPU_CMD_CLEAN_INV_CACHES);
ret = wait_for_completion_timeout(&pfdev->perfcnt->dump_comp,
msecs_to_jiffies(1000));
if (!ret) {
ret = -ETIMEDOUT;
goto err_vunmap;
}
perfcnt->user = user;
as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
cfg = GPU_PERFCNT_CFG_AS(as) |
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
/*
* Bifrost GPUs have 2 set of counters, but we're only interested by
* the first one for now.
*/
if (panfrost_model_is_bifrost(pfdev))
cfg |= GPU_PERFCNT_CFG_SETSEL(counterset);
gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0xffffffff);
gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0xffffffff);
gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0xffffffff);
/*
* Due to PRLAM-8186 we need to disable the Tiler before we enable HW
* counters.
*/
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
else
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
gpu_write(pfdev, GPU_PERFCNT_CFG, cfg);
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
/* The BO ref is retained by the mapping. */
drm_gem_object_put(&bo->base);
return 0;
err_vunmap:
drm_gem_vunmap_unlocked(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put(&bo->base);
err_put_pm:
pm_runtime_put(pfdev->dev);
return ret;
}
static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
struct drm_file *file_priv)
{
struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(perfcnt->buf);
if (user != perfcnt->user)
return -EINVAL;
gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0x0);
gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0x0);
gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0x0);
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
gpu_write(pfdev, GPU_PERFCNT_CFG,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
return 0;
}
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_enable *req = data;
int ret;
ret = panfrost_unstable_ioctl_check();
if (ret)
return ret;
/* Only Bifrost GPUs have 2 set of counters. */
if (req->counterset > (panfrost_model_is_bifrost(pfdev) ? 1 : 0))
return -EINVAL;
mutex_lock(&perfcnt->lock);
if (req->enable)
ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
req->counterset);
else
ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
return ret;
}
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_dump *req = data;
void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr;
int ret;
ret = panfrost_unstable_ioctl_check();
if (ret)
return ret;
mutex_lock(&perfcnt->lock);
if (perfcnt->user != file_priv->driver_priv) {
ret = -EINVAL;
goto out;
}
ret = panfrost_perfcnt_dump_locked(pfdev);
if (ret)
goto out;
if (copy_to_user(user_ptr, perfcnt->buf, perfcnt->bosize))
ret = -EFAULT;
out:
mutex_unlock(&perfcnt->lock);
return ret;
}
void panfrost_perfcnt_close(struct drm_file *file_priv)
{
struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = pfile->pfdev;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
pm_runtime_get_sync(pfdev->dev);
mutex_lock(&perfcnt->lock);
if (perfcnt->user == pfile)
panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
}
int panfrost_perfcnt_init(struct panfrost_device *pfdev)
{
struct panfrost_perfcnt *perfcnt;
size_t size;
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_V4)) {
unsigned int ncoregroups;
ncoregroups = hweight64(pfdev->features.l2_present);
size = ncoregroups * BLOCKS_PER_COREGROUP *
COUNTERS_PER_BLOCK * BYTES_PER_COUNTER;
} else {
unsigned int nl2c, ncores;
/*
* TODO: define a macro to extract the number of l2 caches from
* mem_features.
*/
nl2c = ((pfdev->features.mem_features >> 8) & GENMASK(3, 0)) + 1;
/*
* shader_present might be sparse, but the counters layout
* forces to dump unused regions too, hence the fls64() call
* instead of hweight64().
*/
ncores = fls64(pfdev->features.shader_present);
/*
* There's always one JM and one Tiler block, hence the '+ 2'
* here.
*/
size = (nl2c + ncores + 2) *
COUNTERS_PER_BLOCK * BYTES_PER_COUNTER;
}
perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL);
if (!perfcnt)
return -ENOMEM;
perfcnt->bosize = size;
/* Start with everything disabled. */
gpu_write(pfdev, GPU_PERFCNT_CFG,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0);
gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0);
gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0);
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
init_completion(&perfcnt->dump_comp);
mutex_init(&perfcnt->lock);
pfdev->perfcnt = perfcnt;
return 0;
}
void panfrost_perfcnt_fini(struct panfrost_device *pfdev)
{
/* Disable everything before leaving. */
gpu_write(pfdev, GPU_PERFCNT_CFG,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0);
gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0);
gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0);
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_perfcnt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2021 Collabora ltd. */
#include <linux/err.h>
#include <linux/device.h>
#include <linux/devcoredump.h>
#include <linux/moduleparam.h>
#include <linux/iosys-map.h>
#include <drm/panfrost_drm.h>
#include <drm/drm_device.h>
#include "panfrost_job.h"
#include "panfrost_gem.h"
#include "panfrost_regs.h"
#include "panfrost_dump.h"
#include "panfrost_device.h"
static bool panfrost_dump_core = true;
module_param_named(dump_core, panfrost_dump_core, bool, 0600);
struct panfrost_dump_iterator {
void *start;
struct panfrost_dump_object_header *hdr;
void *data;
};
static const unsigned short panfrost_dump_registers[] = {
SHADER_READY_LO,
SHADER_READY_HI,
TILER_READY_LO,
TILER_READY_HI,
L2_READY_LO,
L2_READY_HI,
JOB_INT_MASK,
JOB_INT_STAT,
JS_HEAD_LO(0),
JS_HEAD_HI(0),
JS_TAIL_LO(0),
JS_TAIL_HI(0),
JS_AFFINITY_LO(0),
JS_AFFINITY_HI(0),
JS_CONFIG(0),
JS_STATUS(0),
JS_HEAD_NEXT_LO(0),
JS_HEAD_NEXT_HI(0),
JS_AFFINITY_NEXT_LO(0),
JS_AFFINITY_NEXT_HI(0),
JS_CONFIG_NEXT(0),
MMU_INT_MASK,
MMU_INT_STAT,
AS_TRANSTAB_LO(0),
AS_TRANSTAB_HI(0),
AS_MEMATTR_LO(0),
AS_MEMATTR_HI(0),
AS_FAULTSTATUS(0),
AS_FAULTADDRESS_LO(0),
AS_FAULTADDRESS_HI(0),
AS_STATUS(0),
};
static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
u32 type, void *data_end)
{
struct panfrost_dump_object_header *hdr = iter->hdr;
hdr->magic = PANFROSTDUMP_MAGIC;
hdr->type = type;
hdr->file_offset = iter->data - iter->start;
hdr->file_size = data_end - iter->data;
iter->hdr++;
iter->data += hdr->file_size;
}
static void
panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
struct panfrost_device *pfdev,
u32 as_nr, int slot)
{
struct panfrost_dump_registers *dumpreg = iter->data;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(panfrost_dump_registers); i++, dumpreg++) {
unsigned int js_as_offset = 0;
unsigned int reg;
if (panfrost_dump_registers[i] >= JS_BASE &&
panfrost_dump_registers[i] <= JS_BASE + JS_SLOT_STRIDE)
js_as_offset = slot * JS_SLOT_STRIDE;
else if (panfrost_dump_registers[i] >= MMU_BASE &&
panfrost_dump_registers[i] <= MMU_BASE + MMU_AS_STRIDE)
js_as_offset = (as_nr << MMU_AS_SHIFT);
reg = panfrost_dump_registers[i] + js_as_offset;
dumpreg->reg = reg;
dumpreg->value = gpu_read(pfdev, reg);
}
panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
}
void panfrost_core_dump(struct panfrost_job *job)
{
struct panfrost_device *pfdev = job->pfdev;
struct panfrost_dump_iterator iter;
struct drm_gem_object *dbo;
unsigned int n_obj, n_bomap_pages;
u64 *bomap, *bomap_start;
size_t file_size;
u32 as_nr;
int slot;
int ret, i;
as_nr = job->mmu->as;
slot = panfrost_job_get_slot(job);
/* Only catch the first event, or when manually re-armed */
if (!panfrost_dump_core)
return;
panfrost_dump_core = false;
/* At least, we dump registers and end marker */
n_obj = 2;
n_bomap_pages = 0;
file_size = ARRAY_SIZE(panfrost_dump_registers) *
sizeof(struct panfrost_dump_registers);
/* Add in the active buffer objects */
for (i = 0; i < job->bo_count; i++) {
/*
* Even though the CPU could be configured to use 16K or 64K pages, this
* is a very unusual situation for most kernel setups on SoCs that have
* a Panfrost device. Also many places across the driver make the somewhat
* arbitrary assumption that Panfrost's MMU page size is the same as the CPU's,
* so let's have a sanity check to ensure that's always the case
*/
dbo = job->bos[i];
WARN_ON(!IS_ALIGNED(dbo->size, PAGE_SIZE));
file_size += dbo->size;
n_bomap_pages += dbo->size >> PAGE_SHIFT;
n_obj++;
}
/* If we have any buffer objects, add a bomap object */
if (n_bomap_pages) {
file_size += n_bomap_pages * sizeof(*bomap);
n_obj++;
}
/* Add the size of the headers */
file_size += sizeof(*iter.hdr) * n_obj;
/*
* Allocate the file in vmalloc memory, it's likely to be big.
* The reason behind these GFP flags is that we don't want to trigger the
* OOM killer in the event that not enough memory could be found for our
* dump file. We also don't want the allocator to do any error reporting,
* as the right behaviour is failing gracefully if a big enough buffer
* could not be allocated.
*/
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
if (!iter.start) {
dev_warn(pfdev->dev, "failed to allocate devcoredump file\n");
return;
}
/* Point the data member after the headers */
iter.hdr = iter.start;
iter.data = &iter.hdr[n_obj];
memset(iter.hdr, 0, iter.data - iter.start);
/*
* For now, we write the job identifier in the register dump header,
* so that we can decode the entire dump later with pandecode
*/
iter.hdr->reghdr.jc = job->jc;
iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
iter.hdr->reghdr.gpu_id = pfdev->features.id;
iter.hdr->reghdr.nbos = job->bo_count;
panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
/* Reserve space for the bomap */
if (job->bo_count) {
bomap_start = bomap = iter.data;
memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BOMAP,
bomap + n_bomap_pages);
}
for (i = 0; i < job->bo_count; i++) {
struct iosys_map map;
struct panfrost_gem_mapping *mapping;
struct panfrost_gem_object *bo;
struct sg_page_iter page_iter;
void *vaddr;
bo = to_panfrost_bo(job->bos[i]);
mapping = job->mappings[i];
if (!bo->base.sgt) {
dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
iter.hdr->bomap.valid = 0;
goto dump_header;
}
ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
goto dump_header;
}
WARN_ON(!mapping->active);
iter.hdr->bomap.data[0] = bomap - bomap_start;
for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
struct page *page = sg_page_iter_page(&page_iter);
if (!IS_ERR(page)) {
*bomap++ = page_to_phys(page);
} else {
dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
*bomap++ = 0;
}
}
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
drm_gem_vunmap_unlocked(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;
dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
bo->base.base.size);
}
panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_dump.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <[email protected]> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <[email protected]> */
/* Copyright 2019 Collabora ltd. */
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "panfrost_device.h"
#include "panfrost_features.h"
#include "panfrost_issues.h"
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
#include "panfrost_regs.h"
static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 state = gpu_read(pfdev, GPU_INT_STAT);
u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
if (!state)
return IRQ_NONE;
if (state & GPU_IRQ_MASK_ERROR) {
u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panfrost_exception_name(fault_status & 0xFF),
address);
if (state & GPU_IRQ_MULTIPLE_FAULT)
dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
gpu_write(pfdev, GPU_INT_MASK, 0);
}
if (state & GPU_IRQ_PERFCNT_SAMPLE_COMPLETED)
panfrost_perfcnt_sample_done(pfdev);
if (state & GPU_IRQ_CLEAN_CACHES_COMPLETED)
panfrost_perfcnt_clean_cache_done(pfdev);
gpu_write(pfdev, GPU_INT_CLEAR, state);
return IRQ_HANDLED;
}
int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
{
int ret;
u32 val;
gpu_write(pfdev, GPU_INT_MASK, 0);
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
if (ret) {
dev_err(pfdev->dev, "gpu soft reset timed out\n");
return ret;
}
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
return 0;
}
void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
{
/*
* The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
* these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
* to operate correctly.
*/
gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
}
static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
{
u32 quirks = 0;
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) ||
panfrost_has_hw_issue(pfdev, HW_ISSUE_11035))
quirks |= SC_LS_PAUSEBUFFER_DISABLE;
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327))
quirks |= SC_SDC_DISABLE_OQ_DISCARD;
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797))
quirks |= SC_ENABLE_TEXGRD_FLAGS;
if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) {
if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */
quirks |= SC_LS_ATTR_CHECK_DISABLE;
else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */
quirks |= SC_LS_ALLOW_ATTR_TYPES;
}
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_2968_TTRX_3162))
quirks |= SC_VAR_ALGORITHM;
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
quirks |= SC_TLS_HASH_ENABLE;
if (quirks)
gpu_write(pfdev, GPU_SHADER_CONFIG, quirks);
quirks = gpu_read(pfdev, GPU_TILER_CONFIG);
/* Set tiler clock gate override if required */
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953))
quirks |= TC_CLOCK_GATE_OVERRIDE;
gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
quirks = 0;
if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
pfdev->features.revision >= 0x2000)
quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT;
else if (panfrost_model_eq(pfdev, 0x6000) &&
pfdev->features.coherency_features == COHERENCY_ACE)
quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
JM_FORCE_COHERENCY_FEATURES_SHIFT;
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE))
quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT;
if (quirks)
gpu_write(pfdev, GPU_JM_CONFIG, quirks);
/* Here goes platform specific quirks */
if (pfdev->comp->vendor_quirk)
pfdev->comp->vendor_quirk(pfdev);
}
#define MAX_HW_REVS 6
struct panfrost_model {
const char *name;
u32 id;
u32 id_mask;
u64 features;
u64 issues;
struct {
u32 revision;
u64 issues;
} revs[MAX_HW_REVS];
};
#define GPU_MODEL(_name, _id, ...) \
{\
.name = __stringify(_name), \
.id = _id, \
.features = hw_features_##_name, \
.issues = hw_issues_##_name, \
.revs = { __VA_ARGS__ }, \
}
#define GPU_REV_EXT(name, _rev, _p, _s, stat) \
{\
.revision = (_rev) << 12 | (_p) << 4 | (_s), \
.issues = hw_issues_##name##_r##_rev##p##_p##stat, \
}
#define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
static const struct panfrost_model gpu_models[] = {
/* T60x has an oddball version */
GPU_MODEL(t600, 0x600,
GPU_REV_EXT(t600, 0, 0, 1, _15dev0)),
GPU_MODEL(t620, 0x620,
GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)),
GPU_MODEL(t720, 0x720),
GPU_MODEL(t760, 0x750,
GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1),
GPU_REV_EXT(t760, 0, 1, 0, _50rel0),
GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)),
GPU_MODEL(t820, 0x820),
GPU_MODEL(t830, 0x830),
GPU_MODEL(t860, 0x860),
GPU_MODEL(t880, 0x880),
GPU_MODEL(g71, 0x6000,
GPU_REV_EXT(g71, 0, 0, 1, _05dev0)),
GPU_MODEL(g72, 0x6001),
GPU_MODEL(g51, 0x7000),
GPU_MODEL(g76, 0x7001),
GPU_MODEL(g52, 0x7002),
GPU_MODEL(g31, 0x7003,
GPU_REV(g31, 1, 0)),
GPU_MODEL(g57, 0x9001,
GPU_REV(g57, 0, 0)),
/* MediaTek MT8192 has a Mali-G57 with a different GPU ID from the
* standard. Arm's driver does not appear to handle this model.
* ChromeOS has a hack downstream for it. Treat it as equivalent to
* standard Mali-G57 for now.
*/
GPU_MODEL(g57, 0x9003,
GPU_REV(g57, 0, 0)),
};
static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
{
u32 gpu_id, num_js, major, minor, status, rev;
const char *name = "unknown";
u64 hw_feat = 0;
u64 hw_issues = hw_issues_all;
const struct panfrost_model *model;
int i;
pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES);
pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES);
pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES);
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT);
pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT);
num_js = hweight32(pfdev->features.js_present);
for (i = 0; i < num_js; i++)
pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i));
pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO);
pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32;
pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO);
pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32;
pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO);
pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32;
pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC);
gpu_id = gpu_read(pfdev, GPU_ID);
pfdev->features.revision = gpu_id & 0xffff;
pfdev->features.id = gpu_id >> 16;
/* The T60x has an oddball ID value. Fix it up to the standard Midgard
* format so we (and userspace) don't have to special case it.
*/
if (pfdev->features.id == 0x6956)
pfdev->features.id = 0x0600;
major = (pfdev->features.revision >> 12) & 0xf;
minor = (pfdev->features.revision >> 4) & 0xff;
status = pfdev->features.revision & 0xf;
rev = pfdev->features.revision;
gpu_id = pfdev->features.id;
for (model = gpu_models; model->name; model++) {
int best = -1;
if (!panfrost_model_eq(pfdev, model->id))
continue;
name = model->name;
hw_feat = model->features;
hw_issues |= model->issues;
for (i = 0; i < MAX_HW_REVS; i++) {
if (model->revs[i].revision == rev) {
best = i;
break;
} else if (model->revs[i].revision == (rev & ~0xf))
best = i;
}
if (best >= 0)
hw_issues |= model->revs[best].issues;
break;
}
bitmap_from_u64(pfdev->features.hw_features, hw_feat);
bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
name, gpu_id, major, minor, status);
dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
pfdev->features.hw_features,
pfdev->features.hw_issues);
dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
pfdev->features.l2_features,
pfdev->features.core_features,
pfdev->features.tiler_features,
pfdev->features.mem_features,
pfdev->features.mmu_features,
pfdev->features.as_present,
pfdev->features.js_present);
dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
pfdev->features.shader_present, pfdev->features.l2_present);
}
void panfrost_gpu_power_on(struct panfrost_device *pfdev)
{
int ret;
u32 val;
u64 core_mask = U64_MAX;
panfrost_gpu_init_quirks(pfdev);
if (pfdev->features.l2_present != 1) {
/*
* Only support one core group now.
* ~(l2_present - 1) unsets all bits in l2_present except
* the bottom bit. (l2_present - 2) has all the bits in
* the first core group set. AND them together to generate
* a mask of cores in the first core group.
*/
core_mask = ~(pfdev->features.l2_present - 1) &
(pfdev->features.l2_present - 2);
dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
hweight64(core_mask),
hweight64(pfdev->features.shader_present));
}
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == (pfdev->features.l2_present & core_mask),
100, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO,
pfdev->features.shader_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == (pfdev->features.shader_present & core_mask),
100, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 100, 1000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
gpu_write(pfdev, TILER_PWROFF_LO, 0);
gpu_write(pfdev, SHADER_PWROFF_LO, 0);
gpu_write(pfdev, L2_PWROFF_LO, 0);
}
int panfrost_gpu_init(struct panfrost_device *pfdev)
{
int err, irq;
err = panfrost_gpu_soft_reset(pfdev);
if (err)
return err;
panfrost_gpu_init_features(pfdev);
err = dma_set_mask_and_coherent(pfdev->dev,
DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
if (err)
return err;
dma_set_max_seg_size(pfdev->dev, UINT_MAX);
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
if (irq <= 0)
return -ENODEV;
err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
return err;
}
panfrost_gpu_power_on(pfdev);
return 0;
}
void panfrost_gpu_fini(struct panfrost_device *pfdev)
{
panfrost_gpu_power_off(pfdev);
}
u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
{
u32 flush_id;
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) {
/* Flush reduction only makes sense when the GPU is kept powered on between jobs */
if (pm_runtime_get_if_in_use(pfdev->dev)) {
flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
pm_runtime_put(pfdev->dev);
return flush_id;
}
}
return 0;
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_gpu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Collabora ltd. */
#include <linux/clk.h>
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
#include <linux/nvmem-consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq)
{
ktime_t now, last;
now = ktime_get();
last = pfdevfreq->time_last_update;
if (pfdevfreq->busy_count > 0)
pfdevfreq->busy_time += ktime_sub(now, last);
else
pfdevfreq->idle_time += ktime_sub(now, last);
pfdevfreq->time_last_update = now;
}
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct dev_pm_opp *opp;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
return dev_pm_opp_set_rate(dev, *freq);
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
{
pfdevfreq->busy_time = 0;
pfdevfreq->idle_time = 0;
pfdevfreq->time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
unsigned long irqflags;
status->current_frequency = clk_get_rate(pfdev->clock);
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
pfdevfreq->idle_time));
status->busy_time = ktime_to_ns(pfdevfreq->busy_time);
panfrost_devfreq_reset(pfdevfreq);
spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
status->busy_time, status->total_time,
status->busy_time / (status->total_time / 100),
status->current_frequency / 1000 / 1000);
return 0;
}
static struct devfreq_dev_profile panfrost_devfreq_profile = {
.timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
};
static int panfrost_read_speedbin(struct device *dev)
{
u32 val;
int ret;
ret = nvmem_cell_read_variable_le_u32(dev, "speed-bin", &val);
if (ret) {
/*
* -ENOENT means that this platform doesn't support speedbins
* as it didn't declare any speed-bin nvmem: in this case, we
* keep going without it; any other error means that we are
* supposed to read the bin value, but we failed doing so.
*/
if (ret != -ENOENT && ret != -EOPNOTSUPP) {
DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
return ret;
}
return 0;
}
DRM_DEV_DEBUG(dev, "Using speed-bin = 0x%x\n", val);
return devm_pm_opp_set_supported_hw(dev, &val, 1);
}
int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
unsigned long cur_freq;
struct device *dev = &pfdev->pdev->dev;
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
if (pfdev->comp->num_supplies > 1) {
/*
* GPUs with more than 1 supply require platform-specific handling:
* continue without devfreq
*/
DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n");
return 0;
}
ret = panfrost_read_speedbin(dev);
if (ret)
return ret;
ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
return ret;
}
}
ret = devm_pm_opp_of_add_table(dev);
if (ret) {
/* Optional, continue without devfreq */
if (ret == -ENODEV)
ret = 0;
return ret;
}
pfdevfreq->opp_of_table_added = true;
spin_lock_init(&pfdevfreq->lock);
panfrost_devfreq_reset(pfdevfreq);
cur_freq = clk_get_rate(pfdev->clock);
opp = devfreq_recommended_opp(dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
/*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()
*/
ret = dev_pm_opp_set_opp(dev, opp);
if (ret) {
DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
return ret;
}
dev_pm_opp_put(opp);
/*
* Setup default thresholds for the simple_ondemand governor.
* The values are chosen based on experiments.
*/
pfdevfreq->gov_data.upthreshold = 45;
pfdevfreq->gov_data.downdifferential = 5;
devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&pfdevfreq->gov_data);
if (IS_ERR(devfreq)) {
DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
return PTR_ERR(devfreq);
}
pfdevfreq->devfreq = devfreq;
cooling = devfreq_cooling_em_register(devfreq, NULL);
if (IS_ERR(cooling))
DRM_DEV_INFO(dev, "Failed to register cooling device\n");
else
pfdevfreq->cooling = cooling;
return 0;
}
void panfrost_devfreq_fini(struct panfrost_device *pfdev)
{
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
if (pfdevfreq->cooling) {
devfreq_cooling_unregister(pfdevfreq->cooling);
pfdevfreq->cooling = NULL;
}
}
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
if (!pfdevfreq->devfreq)
return;
panfrost_devfreq_reset(pfdevfreq);
devfreq_resume_device(pfdevfreq->devfreq);
}
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
{
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
if (!pfdevfreq->devfreq)
return;
devfreq_suspend_device(pfdevfreq->devfreq);
}
void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq)
{
unsigned long irqflags;
if (!pfdevfreq->devfreq)
return;
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
pfdevfreq->busy_count++;
spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
}
void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq)
{
unsigned long irqflags;
if (!pfdevfreq->devfreq)
return;
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
WARN_ON(--pfdevfreq->busy_count < 0);
spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
}
| linux-master | drivers/gpu/drm/panfrost/panfrost_devfreq.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <[email protected]> */
/* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
#include "panfrost_features.h"
#include "panfrost_issues.h"
#include "panfrost_gpu.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
#include "panfrost_perfcnt.h"
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev);
if (IS_ERR(pfdev->rstc)) {
dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
}
return reset_control_deassert(pfdev->rstc);
}
static void panfrost_reset_fini(struct panfrost_device *pfdev)
{
reset_control_assert(pfdev->rstc);
}
static int panfrost_clk_init(struct panfrost_device *pfdev)
{
int err;
unsigned long rate;
pfdev->clock = devm_clk_get(pfdev->dev, NULL);
if (IS_ERR(pfdev->clock)) {
dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
return PTR_ERR(pfdev->clock);
}
rate = clk_get_rate(pfdev->clock);
dev_info(pfdev->dev, "clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->clock);
if (err)
return err;
pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus");
if (IS_ERR(pfdev->bus_clock)) {
dev_err(pfdev->dev, "get bus_clock failed %ld\n",
PTR_ERR(pfdev->bus_clock));
err = PTR_ERR(pfdev->bus_clock);
goto disable_clock;
}
if (pfdev->bus_clock) {
rate = clk_get_rate(pfdev->bus_clock);
dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->bus_clock);
if (err)
goto disable_clock;
}
return 0;
disable_clock:
clk_disable_unprepare(pfdev->clock);
return err;
}
static void panfrost_clk_fini(struct panfrost_device *pfdev)
{
clk_disable_unprepare(pfdev->bus_clock);
clk_disable_unprepare(pfdev->clock);
}
static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret, i;
pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies,
sizeof(*pfdev->regulators),
GFP_KERNEL);
if (!pfdev->regulators)
return -ENOMEM;
for (i = 0; i < pfdev->comp->num_supplies; i++)
pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
ret = devm_regulator_bulk_get(pfdev->dev,
pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(pfdev->dev, "failed to get regulators: %d\n",
ret);
return ret;
}
ret = regulator_bulk_enable(pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
return 0;
}
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
if (!pfdev->regulators)
return;
regulator_bulk_disable(pfdev->comp->num_supplies, pfdev->regulators);
}
static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
if (!pfdev->pm_domain_devs[i])
break;
if (pfdev->pm_domain_links[i])
device_link_del(pfdev->pm_domain_links[i]);
dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
}
}
static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
{
int err;
int i, num_domains;
num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
"power-domains",
"#power-domain-cells");
/*
* Single domain is handled by the core, and, if only a single power
* the power domain is requested, the property is optional.
*/
if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
return 0;
if (num_domains != pfdev->comp->num_pm_domains) {
dev_err(pfdev->dev,
"Incorrect number of power domains: %d provided, %d needed\n",
num_domains, pfdev->comp->num_pm_domains);
return -EINVAL;
}
if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
"Too many supplies in compatible structure.\n"))
return -EINVAL;
for (i = 0; i < num_domains; i++) {
pfdev->pm_domain_devs[i] =
dev_pm_domain_attach_by_name(pfdev->dev,
pfdev->comp->pm_domain_names[i]);
if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
pfdev->pm_domain_devs[i] = NULL;
dev_err(pfdev->dev,
"failed to get pm-domain %s(%d): %d\n",
pfdev->comp->pm_domain_names[i], i, err);
goto err;
}
pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
if (!pfdev->pm_domain_links[i]) {
dev_err(pfdev->pm_domain_devs[i],
"adding device link failed!\n");
err = -ENODEV;
goto err;
}
}
return 0;
err:
panfrost_pm_domain_fini(pfdev);
return err;
}
int panfrost_device_init(struct panfrost_device *pfdev)
{
int err;
mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
INIT_LIST_HEAD(&pfdev->as_lru_list);
spin_lock_init(&pfdev->as_lock);
err = panfrost_clk_init(pfdev);
if (err) {
dev_err(pfdev->dev, "clk init failed %d\n", err);
return err;
}
err = panfrost_devfreq_init(pfdev);
if (err) {
if (err != -EPROBE_DEFER)
dev_err(pfdev->dev, "devfreq init failed %d\n", err);
goto out_clk;
}
/* OPP will handle regulators */
if (!pfdev->pfdevfreq.opp_of_table_added) {
err = panfrost_regulator_init(pfdev);
if (err)
goto out_devfreq;
}
err = panfrost_reset_init(pfdev);
if (err) {
dev_err(pfdev->dev, "reset init failed %d\n", err);
goto out_regulator;
}
err = panfrost_pm_domain_init(pfdev);
if (err)
goto out_reset;
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_pm_domain;
}
err = panfrost_gpu_init(pfdev);
if (err)
goto out_pm_domain;
err = panfrost_mmu_init(pfdev);
if (err)
goto out_gpu;
err = panfrost_job_init(pfdev);
if (err)
goto out_mmu;
err = panfrost_perfcnt_init(pfdev);
if (err)
goto out_job;
return 0;
out_job:
panfrost_job_fini(pfdev);
out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
panfrost_gpu_fini(pfdev);
out_pm_domain:
panfrost_pm_domain_fini(pfdev);
out_reset:
panfrost_reset_fini(pfdev);
out_regulator:
panfrost_regulator_fini(pfdev);
out_devfreq:
panfrost_devfreq_fini(pfdev);
out_clk:
panfrost_clk_fini(pfdev);
return err;
}
void panfrost_device_fini(struct panfrost_device *pfdev)
{
panfrost_perfcnt_fini(pfdev);
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
panfrost_pm_domain_fini(pfdev);
panfrost_reset_fini(pfdev);
panfrost_devfreq_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
}
#define PANFROST_EXCEPTION(id) \
[DRM_PANFROST_EXCEPTION_ ## id] = { \
.name = #id, \
}
struct panfrost_exception_info {
const char *name;
};
static const struct panfrost_exception_info panfrost_exception_infos[] = {
PANFROST_EXCEPTION(OK),
PANFROST_EXCEPTION(DONE),
PANFROST_EXCEPTION(INTERRUPTED),
PANFROST_EXCEPTION(STOPPED),
PANFROST_EXCEPTION(TERMINATED),
PANFROST_EXCEPTION(KABOOM),
PANFROST_EXCEPTION(EUREKA),
PANFROST_EXCEPTION(ACTIVE),
PANFROST_EXCEPTION(JOB_CONFIG_FAULT),
PANFROST_EXCEPTION(JOB_POWER_FAULT),
PANFROST_EXCEPTION(JOB_READ_FAULT),
PANFROST_EXCEPTION(JOB_WRITE_FAULT),
PANFROST_EXCEPTION(JOB_AFFINITY_FAULT),
PANFROST_EXCEPTION(JOB_BUS_FAULT),
PANFROST_EXCEPTION(INSTR_INVALID_PC),
PANFROST_EXCEPTION(INSTR_INVALID_ENC),
PANFROST_EXCEPTION(INSTR_TYPE_MISMATCH),
PANFROST_EXCEPTION(INSTR_OPERAND_FAULT),
PANFROST_EXCEPTION(INSTR_TLS_FAULT),
PANFROST_EXCEPTION(INSTR_BARRIER_FAULT),
PANFROST_EXCEPTION(INSTR_ALIGN_FAULT),
PANFROST_EXCEPTION(DATA_INVALID_FAULT),
PANFROST_EXCEPTION(TILE_RANGE_FAULT),
PANFROST_EXCEPTION(ADDR_RANGE_FAULT),
PANFROST_EXCEPTION(IMPRECISE_FAULT),
PANFROST_EXCEPTION(OOM),
PANFROST_EXCEPTION(OOM_AFBC),
PANFROST_EXCEPTION(UNKNOWN),
PANFROST_EXCEPTION(DELAYED_BUS_FAULT),
PANFROST_EXCEPTION(GPU_SHAREABILITY_FAULT),
PANFROST_EXCEPTION(SYS_SHAREABILITY_FAULT),
PANFROST_EXCEPTION(GPU_CACHEABILITY_FAULT),
PANFROST_EXCEPTION(TRANSLATION_FAULT_0),
PANFROST_EXCEPTION(TRANSLATION_FAULT_1),
PANFROST_EXCEPTION(TRANSLATION_FAULT_2),
PANFROST_EXCEPTION(TRANSLATION_FAULT_3),
PANFROST_EXCEPTION(TRANSLATION_FAULT_4),
PANFROST_EXCEPTION(TRANSLATION_FAULT_IDENTITY),
PANFROST_EXCEPTION(PERM_FAULT_0),
PANFROST_EXCEPTION(PERM_FAULT_1),
PANFROST_EXCEPTION(PERM_FAULT_2),
PANFROST_EXCEPTION(PERM_FAULT_3),
PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_0),
PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_1),
PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_2),
PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_3),
PANFROST_EXCEPTION(ACCESS_FLAG_0),
PANFROST_EXCEPTION(ACCESS_FLAG_1),
PANFROST_EXCEPTION(ACCESS_FLAG_2),
PANFROST_EXCEPTION(ACCESS_FLAG_3),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN0),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN1),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN2),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN3),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
PANFROST_EXCEPTION(MEM_ATTR_FAULT_0),
PANFROST_EXCEPTION(MEM_ATTR_FAULT_1),
PANFROST_EXCEPTION(MEM_ATTR_FAULT_2),
PANFROST_EXCEPTION(MEM_ATTR_FAULT_3),
PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_0),
PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_1),
PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_2),
PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_3),
};
const char *panfrost_exception_name(u32 exception_code)
{
if (WARN_ON(exception_code >= ARRAY_SIZE(panfrost_exception_infos) ||
!panfrost_exception_infos[exception_code].name))
return "Unknown exception type";
return panfrost_exception_infos[exception_code].name;
}
bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev,
u32 exception_code)
{
/* If an occlusion query write causes a bus fault on affected GPUs,
* future fragment jobs may hang. Reset to workaround.
*/
if (exception_code == DRM_PANFROST_EXCEPTION_JOB_BUS_FAULT)
return panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_3076);
/* No other GPUs we support need a reset */
return false;
}
void panfrost_device_reset(struct panfrost_device *pfdev)
{
panfrost_gpu_soft_reset(pfdev);
panfrost_gpu_power_on(pfdev);
panfrost_mmu_reset(pfdev);
panfrost_job_enable_interrupts(pfdev);
}
static int panfrost_device_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
}
static int panfrost_device_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
if (!panfrost_job_is_idle(pfdev))
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
panfrost_gpu_power_off(pfdev);
return 0;
}
EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend,
panfrost_device_resume, NULL);
| linux-master | drivers/gpu/drm/panfrost/panfrost_device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Intel Corporation.
*
* Authors:
* Ramalingam C <[email protected]>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_print.h>
#include <drm/drm_device.h>
#include <drm/drm_property.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_connector.h>
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
ksv[0], ksv[1], ksv[2], ksv[3], ksv[4]);
}
static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
{
u32 parsed_bytes = 0, ksv_count = 0, vrl_ksv_cnt, vrl_sz;
while (parsed_bytes < vrls_length) {
vrl_ksv_cnt = *buf;
ksv_count += vrl_ksv_cnt;
vrl_sz = (vrl_ksv_cnt * DRM_HDCP_KSV_LEN) + 1;
buf += vrl_sz;
parsed_bytes += vrl_sz;
}
/*
* When vrls are not valid, ksvs are not considered.
* Hence SRM will be discarded.
*/
if (parsed_bytes != vrls_length)
ksv_count = 0;
return ksv_count;
}
static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
u32 vrls_length)
{
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
u32 parsed_bytes = 0, ksv_count = 0;
do {
vrl_ksv_cnt = *buf;
vrl_ksv_sz = vrl_ksv_cnt * DRM_HDCP_KSV_LEN;
buf++;
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
} while (parsed_bytes < vrls_length);
return ksv_count;
}
static inline u32 get_vrl_length(const u8 *buf)
{
return drm_hdcp_be24_to_cpu(buf);
}
static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
if (count < (sizeof(struct hdcp_srm_header) +
DRM_HDCP_1_4_VRL_LENGTH_SIZE + DRM_HDCP_1_4_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length\n");
return -EINVAL;
}
header = (struct hdcp_srm_header *)buf;
DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
header->srm_id,
be16_to_cpu(header->srm_version), header->srm_gen_no);
WARN_ON(header->reserved);
buf = buf + sizeof(*header);
vrl_length = get_vrl_length(buf);
if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
vrl_length < (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
DRM_HDCP_1_4_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length or vrl length\n");
return -EINVAL;
}
/* Length of the all vrls combined */
vrl_length -= (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
DRM_HDCP_1_4_DCP_SIG_SIZE);
if (!vrl_length) {
DRM_ERROR("No vrl found\n");
return -EINVAL;
}
buf += DRM_HDCP_1_4_VRL_LENGTH_SIZE;
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
return 0;
}
*revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
vrl_length) != ksv_count) {
*revoked_ksv_cnt = 0;
kfree(*revoked_ksv_list);
return -EINVAL;
}
*revoked_ksv_cnt = ksv_count;
return 0;
}
static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
if (count < (sizeof(struct hdcp_srm_header) +
DRM_HDCP_2_VRL_LENGTH_SIZE + DRM_HDCP_2_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length\n");
return -EINVAL;
}
header = (struct hdcp_srm_header *)buf;
DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
header->srm_id & DRM_HDCP_SRM_ID_MASK,
be16_to_cpu(header->srm_version), header->srm_gen_no);
if (header->reserved)
return -EINVAL;
buf = buf + sizeof(*header);
vrl_length = get_vrl_length(buf);
if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
vrl_length < (DRM_HDCP_2_VRL_LENGTH_SIZE +
DRM_HDCP_2_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length or vrl length\n");
return -EINVAL;
}
/* Length of the all vrls combined */
vrl_length -= (DRM_HDCP_2_VRL_LENGTH_SIZE +
DRM_HDCP_2_DCP_SIG_SIZE);
if (!vrl_length) {
DRM_ERROR("No vrl found\n");
return -EINVAL;
}
buf += DRM_HDCP_2_VRL_LENGTH_SIZE;
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
return 0;
}
*revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
ksv_sz = ksv_count * DRM_HDCP_KSV_LEN;
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
memcpy(*revoked_ksv_list, buf, ksv_sz);
*revoked_ksv_cnt = ksv_count;
return 0;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
{
return *buf == (u8)(DRM_HDCP_1_4_SRM_ID << 4);
}
static inline bool is_srm_version_hdcp2(const u8 *buf)
{
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
static int drm_hdcp_srm_update(const u8 *buf, size_t count,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
if (count < sizeof(struct hdcp_srm_header))
return -EINVAL;
if (is_srm_version_hdcp1(buf))
return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
revoked_ksv_cnt);
else if (is_srm_version_hdcp2(buf))
return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
revoked_ksv_cnt);
else
return -EINVAL;
}
static int drm_hdcp_request_srm(struct drm_device *drm_dev,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
drm_dev->dev);
if (ret < 0) {
*revoked_ksv_cnt = 0;
*revoked_ksv_list = NULL;
ret = 0;
goto exit;
}
if (fw->size && fw->data)
ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
revoked_ksv_cnt);
exit:
release_firmware(fw);
return ret;
}
/**
* drm_hdcp_check_ksvs_revoked - Check the revoked status of the IDs
*
* @drm_dev: drm_device for which HDCP revocation check is requested
* @ksvs: List of KSVs (HDCP receiver IDs)
* @ksv_count: KSV count passed in through @ksvs
*
* This function reads the HDCP System renewability Message(SRM Table)
* from userspace as a firmware and parses it for the revoked HDCP
* KSVs(Receiver IDs) detected by DCP LLC. Once the revoked KSVs are known,
* revoked state of the KSVs in the list passed in by display drivers are
* decided and response is sent.
*
* SRM should be presented in the name of "display_hdcp_srm.bin".
*
* Format of the SRM table, that userspace needs to write into the binary file,
* is defined at:
* 1. Renewability chapter on 55th page of HDCP 1.4 specification
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20Specification%20Rev1_4_Secure.pdf
* 2. Renewability chapter on 63rd page of HDCP 2.2 specification
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
*
* Returns:
* Count of the revoked KSVs or -ve error number in case of the failure.
*/
int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
u32 ksv_count)
{
u32 revoked_ksv_cnt = 0, i, j;
u8 *revoked_ksv_list = NULL;
int ret = 0;
ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
&revoked_ksv_cnt);
if (ret)
return ret;
/* revoked_ksv_cnt will be zero when above function failed */
for (i = 0; i < revoked_ksv_cnt; i++)
for (j = 0; j < ksv_count; j++)
if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
&revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
DRM_HDCP_KSV_LEN)) {
DRM_DEBUG("Revoked KSV is ");
drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
ret++;
}
kfree(revoked_ksv_list);
return ret;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
{ DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
static struct drm_prop_enum_list drm_hdcp_content_type_enum_list[] = {
{ DRM_MODE_HDCP_CONTENT_TYPE0, "HDCP Type0" },
{ DRM_MODE_HDCP_CONTENT_TYPE1, "HDCP Type1" },
};
DRM_ENUM_NAME_FN(drm_get_hdcp_content_type_name,
drm_hdcp_content_type_enum_list)
/**
* drm_connector_attach_content_protection_property - attach content protection
* property
*
* @connector: connector to attach CP property on.
* @hdcp_content_type: is HDCP Content Type property needed for connector
*
* This is used to add support for content protection on select connectors.
* Content Protection is intentionally vague to allow for different underlying
* technologies, however it is most implemented by HDCP.
*
* When hdcp_content_type is true enum property called HDCP Content Type is
* created (if it is not already) and attached to the connector.
*
* This property is used for sending the protected content's stream type
* from userspace to kernel on selected connectors. Protected content provider
* will decide their type of their content and declare the same to kernel.
*
* Content type will be used during the HDCP 2.2 authentication.
* Content type will be set to &drm_connector_state.hdcp_content_type.
*
* The content protection will be set to &drm_connector_state.content_protection
*
* When kernel triggered content protection state change like DESIRED->ENABLED
* and ENABLED->DESIRED, will use drm_hdcp_update_content_protection() to update
* the content protection state of a connector.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_content_protection_property(
struct drm_connector *connector, bool hdcp_content_type)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop =
dev->mode_config.content_protection_property;
if (!prop)
prop = drm_property_create_enum(dev, 0, "Content Protection",
drm_cp_enum_list,
ARRAY_SIZE(drm_cp_enum_list));
if (!prop)
return -ENOMEM;
drm_object_attach_property(&connector->base, prop,
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
dev->mode_config.content_protection_property = prop;
if (!hdcp_content_type)
return 0;
prop = dev->mode_config.hdcp_content_type_property;
if (!prop)
prop = drm_property_create_enum(dev, 0, "HDCP Content Type",
drm_hdcp_content_type_enum_list,
ARRAY_SIZE(
drm_hdcp_content_type_enum_list));
if (!prop)
return -ENOMEM;
drm_object_attach_property(&connector->base, prop,
DRM_MODE_HDCP_CONTENT_TYPE0);
dev->mode_config.hdcp_content_type_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
/**
* drm_hdcp_update_content_protection - Updates the content protection state
* of a connector
*
* @connector: drm_connector on which content protection state needs an update
* @val: New state of the content protection property
*
* This function can be used by display drivers, to update the kernel triggered
* content protection state changes of a drm_connector such as DESIRED->ENABLED
* and ENABLED->DESIRED. No uevent for DESIRED->UNDESIRED or ENABLED->UNDESIRED,
* as userspace is triggering such state change and kernel performs it without
* fail.This function update the new state of the property into the connector's
* state and generate an uevent to notify the userspace.
*/
void drm_hdcp_update_content_protection(struct drm_connector *connector,
u64 val)
{
struct drm_device *dev = connector->dev;
struct drm_connector_state *state = connector->state;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (state->content_protection == val)
return;
state->content_protection = val;
drm_sysfs_connector_property_event(connector,
dev->mode_config.content_protection_property);
}
EXPORT_SYMBOL(drm_hdcp_update_content_protection);
| linux-master | drivers/gpu/drm/display/drm_hdcp_helper.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corp
*
* Author:
* Manasi Navare <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/byteorder/generic.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_print.h>
/**
* DOC: dsc helpers
*
* VESA specification for DP 1.4 adds a new feature called Display Stream
* Compression (DSC) used to compress the pixel bits before sending it on
* DP/eDP/MIPI DSI interface. DSC is required to be enabled so that the existing
* display interfaces can support high resolutions at higher frames rates uisng
* the maximum available link capacity of these interfaces.
*
* These functions contain some common logic and helpers to deal with VESA
* Display Stream Compression standard required for DSC on Display Port/eDP or
* MIPI display interfaces.
*/
/**
* drm_dsc_dp_pps_header_init() - Initializes the PPS Header
* for DisplayPort as per the DP 1.4 spec.
* @pps_header: Secondary data packet header for DSC Picture
* Parameter Set as defined in &struct dp_sdp_header
*
* DP 1.4 spec defines the secondary data packet for sending the
* picture parameter infoframes from the source to the sink.
* This function populates the SDP header defined in
* &struct dp_sdp_header.
*/
void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
{
memset(pps_header, 0, sizeof(*pps_header));
pps_header->HB1 = DP_SDP_PPS;
pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
}
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
* drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes
* @rc_buffer_block_size: block size code, according to DPCD offset 62h
* @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h
*
* return:
* buffer size in bytes, or 0 on invalid input
*/
int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size)
{
int size = 1024 * (rc_buffer_size + 1);
switch (rc_buffer_block_size) {
case DP_DSC_RC_BUF_BLK_SIZE_1:
return 1 * size;
case DP_DSC_RC_BUF_BLK_SIZE_4:
return 4 * size;
case DP_DSC_RC_BUF_BLK_SIZE_16:
return 16 * size;
case DP_DSC_RC_BUF_BLK_SIZE_64:
return 64 * size;
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size);
/**
* drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
* @pps_payload:
* Bitwise struct for DSC Picture Parameter Set. This is defined
* by &struct drm_dsc_picture_parameter_set
* @dsc_cfg:
* DSC Configuration data filled by driver as defined by
* &struct drm_dsc_config
*
* DSC source device sends a picture parameter set (PPS) containing the
* information required by the sink to decode the compressed frame. Driver
* populates the DSC PPS struct using the DSC configuration parameters in
* the order expected by the DSC Display Sink device. For the DSC, the sink
* device expects the PPS payload in big endian format for fields
* that span more than 1 byte.
*/
void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
const struct drm_dsc_config *dsc_cfg)
{
int i;
/* Protect against someone accidentally changing struct size */
BUILD_BUG_ON(sizeof(*pps_payload) !=
DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
memset(pps_payload, 0, sizeof(*pps_payload));
/* PPS 0 */
pps_payload->dsc_version =
dsc_cfg->dsc_version_minor |
dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
/* PPS 1, 2 is 0 */
/* PPS 3 */
pps_payload->pps_3 =
dsc_cfg->line_buf_depth |
dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
/* PPS 4 */
pps_payload->pps_4 =
((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT) |
dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
/* PPS 5 */
pps_payload->bits_per_pixel_low =
(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
/*
* The DSC panel expects the PPS packet to have big endian format
* for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
* to big endian format. If format is little endian, it will swap
* bytes to convert to Big endian else keep it unchanged.
*/
/* PPS 6, 7 */
pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
/* PPS 8, 9 */
pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
/* PPS 10, 11 */
pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
/* PPS 12, 13 */
pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
/* PPS 14, 15 */
pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
/* PPS 16 */
pps_payload->initial_xmit_delay_high =
((dsc_cfg->initial_xmit_delay &
DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 17 */
pps_payload->initial_xmit_delay_low =
(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
/* PPS 18, 19 */
pps_payload->initial_dec_delay =
cpu_to_be16(dsc_cfg->initial_dec_delay);
/* PPS 20 is 0 */
/* PPS 21 */
pps_payload->initial_scale_value =
dsc_cfg->initial_scale_value;
/* PPS 22, 23 */
pps_payload->scale_increment_interval =
cpu_to_be16(dsc_cfg->scale_increment_interval);
/* PPS 24 */
pps_payload->scale_decrement_interval_high =
((dsc_cfg->scale_decrement_interval &
DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 25 */
pps_payload->scale_decrement_interval_low =
(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
/* PPS 26[7:0], PPS 27[7:5] RESERVED */
/* PPS 27 */
pps_payload->first_line_bpg_offset =
dsc_cfg->first_line_bpg_offset;
/* PPS 28, 29 */
pps_payload->nfl_bpg_offset =
cpu_to_be16(dsc_cfg->nfl_bpg_offset);
/* PPS 30, 31 */
pps_payload->slice_bpg_offset =
cpu_to_be16(dsc_cfg->slice_bpg_offset);
/* PPS 32, 33 */
pps_payload->initial_offset =
cpu_to_be16(dsc_cfg->initial_offset);
/* PPS 34, 35 */
pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
/* PPS 36 */
pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
/* PPS 37 */
pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size);
/* PPS 40 */
pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
/* PPS 41 */
pps_payload->rc_quant_incr_limit0 =
dsc_cfg->rc_quant_incr_limit0;
/* PPS 42 */
pps_payload->rc_quant_incr_limit1 =
dsc_cfg->rc_quant_incr_limit1;
/* PPS 43 */
pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
/* PPS 44 - 57 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
pps_payload->rc_buf_thresh[i] =
dsc_cfg->rc_buf_thresh[i];
/* PPS 58 - 87 */
/*
* For DSC sink programming the RC Range parameter fields
* are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
pps_payload->rc_range_parameters[i] =
cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp <<
DSC_PPS_RC_RANGE_MINQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_max_qp <<
DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_bpg_offset));
}
/* PPS 88 */
pps_payload->native_422_420 = dsc_cfg->native_422 |
dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
/* PPS 89 */
pps_payload->second_line_bpg_offset =
dsc_cfg->second_line_bpg_offset;
/* PPS 90, 91 */
pps_payload->nsl_bpg_offset =
cpu_to_be16(dsc_cfg->nsl_bpg_offset);
/* PPS 92, 93 */
pps_payload->second_line_offset_adj =
cpu_to_be16(dsc_cfg->second_line_offset_adj);
/* PPS 94 - 127 are O */
}
EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
/**
* drm_dsc_set_const_params() - Set DSC parameters considered typically
* constant across operation modes
*
* @vdsc_cfg:
* DSC Configuration data partially filled by driver
*/
void drm_dsc_set_const_params(struct drm_dsc_config *vdsc_cfg)
{
if (!vdsc_cfg->rc_model_size)
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
vdsc_cfg->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
vdsc_cfg->rc_tgt_offset_high = DSC_RC_TGT_OFFSET_HI_CONST;
vdsc_cfg->rc_tgt_offset_low = DSC_RC_TGT_OFFSET_LO_CONST;
if (vdsc_cfg->bits_per_component <= 10)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
else
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
}
EXPORT_SYMBOL(drm_dsc_set_const_params);
/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
static const u16 drm_dsc_rc_buf_thresh[] = {
896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
7744, 7872, 8000, 8064
};
/**
* drm_dsc_set_rc_buf_thresh() - Set thresholds for the RC model
* in accordance with the DSC 1.2 specification.
*
* @vdsc_cfg: DSC Configuration data partially filled by driver
*/
void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg)
{
int i;
BUILD_BUG_ON(ARRAY_SIZE(drm_dsc_rc_buf_thresh) !=
DSC_NUM_BUF_RANGES - 1);
BUILD_BUG_ON(ARRAY_SIZE(drm_dsc_rc_buf_thresh) !=
ARRAY_SIZE(vdsc_cfg->rc_buf_thresh));
for (i = 0; i < ARRAY_SIZE(drm_dsc_rc_buf_thresh); i++)
vdsc_cfg->rc_buf_thresh[i] = drm_dsc_rc_buf_thresh[i] >> 6;
/*
* For 6bpp, RC Buffer threshold 12 and 13 need a different value
* as per C Model
*/
if (vdsc_cfg->bits_per_pixel == 6 << 4) {
vdsc_cfg->rc_buf_thresh[12] = 7936 >> 6;
vdsc_cfg->rc_buf_thresh[13] = 8000 >> 6;
}
}
EXPORT_SYMBOL(drm_dsc_set_rc_buf_thresh);
struct rc_parameters {
u16 initial_xmit_delay;
u8 first_line_bpg_offset;
u16 initial_offset;
u8 flatness_min_qp;
u8 flatness_max_qp;
u8 rc_quant_incr_limit0;
u8 rc_quant_incr_limit1;
struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
};
struct rc_parameters_data {
u8 bpp;
u8 bpc;
struct rc_parameters params;
};
#define DSC_BPP(bpp) ((bpp) << 4)
/*
* Rate Control Related Parameter Recommended Values from DSC_v1.1 spec prior
* to DSC 1.1 fractional bpp underflow SCR (DSC_v1.1_E1.pdf)
*
* Cross-checked against C Model releases: DSC_model_20161212 and 20210623
*/
static const struct rc_parameters_data rc_parameters_pre_scr[] = {
{
.bpp = DSC_BPP(6), .bpc = 8,
{ 683, 15, 6144, 3, 13, 11, 11, {
{ 0, 2, 0 }, { 1, 4, -2 }, { 3, 6, -2 }, { 4, 6, -4 },
{ 5, 7, -6 }, { 5, 7, -6 }, { 6, 7, -6 }, { 6, 8, -8 },
{ 7, 9, -8 }, { 8, 10, -10 }, { 9, 11, -10 }, { 10, 12, -12 },
{ 10, 13, -12 }, { 12, 14, -12 }, { 15, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 8,
{ 512, 12, 6144, 3, 12, 11, 11, {
{ 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
{ 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 10,
{ 512, 12, 6144, 7, 16, 15, 15, {
/*
* DSC model/pre-SCR-cfg has 8 for range_max_qp[0], however
* VESA DSC 1.1 Table E-5 sets it to 4.
*/
{ 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
{ 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 12,
{ 512, 12, 6144, 11, 20, 19, 19, {
{ 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
{ 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
{ 21, 23, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 8,
{ 410, 12, 5632, 3, 12, 11, 11, {
{ 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 11, -10 },
{ 5, 12, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 10,
{ 410, 12, 5632, 7, 16, 15, 15, {
{ 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 15, -10 },
{ 9, 16, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 12,
{ 410, 12, 5632, 11, 20, 19, 19, {
{ 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
{ 13, 19, -10 }, { 13, 20, -12 }, { 15, 21, -12 },
{ 21, 23, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 8,
{ 341, 15, 2048, 3, 12, 11, 11, {
{ 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
{ 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 10,
{ 341, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
{ 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
{ 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 12,
{ 341, 15, 2048, 11, 20, 19, 19, {
{ 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
{ 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
{ 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
{ 21, 23, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 8,
{ 273, 15, 2048, 3, 12, 11, 11, {
{ 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
{ 1, 2, 2 }, { 1, 3, 0 }, { 1, 4, -2 }, { 2, 4, -4 },
{ 3, 4, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 5, 7, -10 },
{ 5, 8, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 10,
{ 273, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
{ 5, 6, 2 }, { 5, 7, 0 }, { 5, 8, -2 }, { 6, 8, -4 },
{ 7, 8, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 9, 11, -10 },
{ 9, 12, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 12,
{ 273, 15, 2048, 11, 20, 19, 19, {
{ 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
{ 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
{ 11, 12, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
{ 13, 15, -10 }, { 13, 16, -12 }, { 15, 21, -12 },
{ 21, 23, -12 }
}
}
},
{ /* sentinel */ }
};
/*
* Selected Rate Control Related Parameter Recommended Values from DSC v1.2, v1.2a, v1.2b and
* DSC_v1.1_E1 specs.
*
* Cross-checked against C Model releases: DSC_model_20161212 and 20210623
*/
static const struct rc_parameters_data rc_parameters_1_2_444[] = {
{
.bpp = DSC_BPP(6), .bpc = 8,
{ 768, 15, 6144, 3, 13, 11, 11, {
{ 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
{ 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
{ 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
{ 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 10,
{ 768, 15, 6144, 7, 17, 15, 15, {
{ 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
{ 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
{ 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
{ 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
{ 17, 18, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 12,
{ 768, 15, 6144, 11, 21, 19, 19, {
{ 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
{ 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
{ 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
{ 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
{ 21, 22, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 14,
{ 768, 15, 6144, 15, 25, 23, 23, {
{ 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
{ 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
{ 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
{ 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
{ 25, 26, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 16,
{ 768, 15, 6144, 19, 29, 27, 27, {
{ 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
{ 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
{ 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
{ 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
{ 29, 30, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 8,
{ 512, 12, 6144, 3, 12, 11, 11, {
{ 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 10, -10 }, { 5, 11, -12 },
{ 5, 11, -12 }, { 9, 12, -12 }, { 12, 13, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 10,
{ 512, 12, 6144, 7, 16, 15, 15, {
{ 0, 8, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 14, -10 }, { 9, 15, -12 },
{ 9, 15, -12 }, { 13, 16, -12 }, { 16, 17, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 12,
{ 512, 12, 6144, 11, 20, 19, 19, {
{ 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 18, -10 },
{ 13, 19, -12 }, { 13, 19, -12 }, { 17, 20, -12 },
{ 20, 21, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 14,
{ 512, 12, 6144, 15, 24, 23, 23, {
{ 0, 12, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
{ 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
{ 24, 25, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 16,
{ 512, 12, 6144, 19, 28, 27, 27, {
{ 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
{ 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
{ 28, 29, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 8,
{ 410, 15, 5632, 3, 12, 11, 11, {
{ 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
{ 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 10,
{ 410, 15, 5632, 7, 16, 15, 15, {
{ 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
{ 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 12,
{ 410, 15, 5632, 11, 20, 19, 19, {
{ 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
{ 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
{ 19, 20, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 14,
{ 410, 15, 5632, 15, 24, 23, 23, {
{ 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
{ 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
{ 23, 24, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 16,
{ 410, 15, 5632, 19, 28, 27, 27, {
{ 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
{ 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
{ 27, 28, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 8,
{ 341, 15, 2048, 3, 12, 11, 11, {
{ 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 8, -8 }, { 3, 9, -10 }, { 5, 9, -10 }, { 5, 9, -12 },
{ 5, 9, -12 }, { 7, 10, -12 }, { 10, 11, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 10,
{ 341, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
{ 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 12, -8 }, { 7, 13, -10 }, { 9, 13, -10 }, { 9, 13, -12 },
{ 9, 13, -12 }, { 11, 14, -12 }, { 14, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 12,
{ 341, 15, 2048, 11, 20, 19, 19, {
{ 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
{ 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 16, -8 }, { 11, 17, -10 }, { 13, 17, -10 },
{ 13, 17, -12 }, { 13, 17, -12 }, { 15, 18, -12 },
{ 18, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 14,
{ 341, 15, 2048, 15, 24, 23, 23, {
{ 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
{ 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
{ 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
{ 22, 23, -12 }
}
}
},
{
.bpp = DSC_BPP(12), .bpc = 16,
{ 341, 15, 2048, 19, 28, 27, 27, {
{ 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
{ 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
{ 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
{ 26, 27, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 8,
{ 273, 15, 2048, 3, 12, 11, 11, {
{ 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
{ 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
{ 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
{ 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 10,
{ 273, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
{ 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
{ 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
{ 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 12,
{ 273, 15, 2048, 11, 20, 19, 19, {
{ 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
{ 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
{ 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
{ 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
{ 16, 17, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 14,
{ 273, 15, 2048, 15, 24, 23, 23, {
{ 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
{ 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
{ 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
{ 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
{ 20, 21, -12 }
}
}
},
{
.bpp = DSC_BPP(15), .bpc = 16,
{ 273, 15, 2048, 19, 28, 27, 27, {
{ 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
{ 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
{ 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
{ 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
{ 24, 25, -12 }
}
}
},
{ /* sentinel */ }
};
/*
* Selected Rate Control Related Parameter Recommended Values for 4:2:2 from
* DSC v1.2, v1.2a, v1.2b
*
* Cross-checked against C Model releases: DSC_model_20161212 and 20210623
*/
static const struct rc_parameters_data rc_parameters_1_2_422[] = {
{
.bpp = DSC_BPP(6), .bpc = 8,
{ 512, 15, 6144, 3, 12, 11, 11, {
{ 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 10, -10 }, { 5, 11, -12 },
{ 5, 11, -12 }, { 9, 12, -12 }, { 12, 13, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 10,
{ 512, 15, 6144, 7, 16, 15, 15, {
{ 0, 8, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 14, -10 }, { 9, 15, -12 },
{ 9, 15, -12 }, { 13, 16, -12 }, { 16, 17, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 12,
{ 512, 15, 6144, 11, 20, 19, 19, {
{ 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 18, -10 },
{ 13, 19, -12 }, { 13, 19, -12 }, { 17, 20, -12 },
{ 20, 21, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 14,
{ 512, 15, 6144, 15, 24, 23, 23, {
{ 0, 12, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
{ 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
{ 24, 25, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 16,
{ 512, 15, 6144, 19, 28, 27, 27, {
{ 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
{ 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
{ 28, 29, -12 }
}
}
},
{
.bpp = DSC_BPP(7), .bpc = 8,
{ 410, 15, 5632, 3, 12, 11, 11, {
{ 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
{ 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
}
}
},
{
.bpp = DSC_BPP(7), .bpc = 10,
{ 410, 15, 5632, 7, 16, 15, 15, {
{ 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
{ 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
}
}
},
{
.bpp = DSC_BPP(7), .bpc = 12,
{ 410, 15, 5632, 11, 20, 19, 19, {
{ 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
{ 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
{ 19, 20, -12 }
}
}
},
{
.bpp = DSC_BPP(7), .bpc = 14,
{ 410, 15, 5632, 15, 24, 23, 23, {
{ 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
{ 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
{ 23, 24, -12 }
}
}
},
{
.bpp = DSC_BPP(7), .bpc = 16,
{ 410, 15, 5632, 19, 28, 27, 27, {
{ 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
{ 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
{ 27, 28, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 8,
{ 341, 15, 2048, 3, 12, 11, 11, {
{ 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 8, -8 }, { 3, 9, -10 }, { 5, 9, -10 }, { 5, 9, -12 },
{ 5, 9, -12 }, { 7, 10, -12 }, { 10, 11, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 10,
{ 341, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
{ 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 12, -8 }, { 7, 13, -10 }, { 9, 13, -10 }, { 9, 13, -12 },
{ 9, 13, -12 }, { 11, 14, -12 }, { 14, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 12,
{ 341, 15, 2048, 11, 20, 19, 19, {
{ 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
{ 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 16, -8 }, { 11, 17, -10 }, { 13, 17, -10 },
{ 13, 17, -12 }, { 13, 17, -12 }, { 15, 18, -12 },
{ 18, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 14,
{ 341, 15, 2048, 15, 24, 23, 23, {
{ 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
{ 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
{ 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
{ 22, 23, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 16,
{ 341, 15, 2048, 19, 28, 27, 27, {
{ 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
{ 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
{ 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
{ 26, 27, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 8,
{ 273, 15, 2048, 3, 12, 11, 11, {
{ 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
{ 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
{ 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
{ 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 10,
{ 273, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
{ 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
{ 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
{ 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 12,
{ 273, 15, 2048, 11, 20, 19, 19, {
{ 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
{ 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
{ 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
{ 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
{ 16, 17, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 14,
{ 273, 15, 2048, 15, 24, 23, 23, {
{ 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
{ 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
{ 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
{ 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
{ 20, 21, -12 }
}
}
},
{
.bpp = DSC_BPP(10), .bpc = 16,
{ 273, 15, 2048, 19, 28, 27, 27, {
{ 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
{ 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
{ 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
{ 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
{ 24, 25, -12 }
}
}
},
{ /* sentinel */ }
};
/*
* Selected Rate Control Related Parameter Recommended Values for 4:2:2 from
* DSC v1.2, v1.2a, v1.2b
*
* Cross-checked against C Model releases: DSC_model_20161212 and 20210623
*/
static const struct rc_parameters_data rc_parameters_1_2_420[] = {
{
.bpp = DSC_BPP(4), .bpc = 8,
{ 512, 12, 6144, 3, 12, 11, 11, {
{ 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 10, -10 }, { 5, 11, -12 },
{ 5, 11, -12 }, { 9, 12, -12 }, { 12, 13, -12 }
}
}
},
{
.bpp = DSC_BPP(4), .bpc = 10,
{ 512, 12, 6144, 7, 16, 15, 15, {
{ 0, 8, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 14, -10 }, { 9, 15, -12 },
{ 9, 15, -12 }, { 13, 16, -12 }, { 16, 17, -12 }
}
}
},
{
.bpp = DSC_BPP(4), .bpc = 12,
{ 512, 12, 6144, 11, 20, 19, 19, {
{ 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 18, -10 },
{ 13, 19, -12 }, { 13, 19, -12 }, { 17, 20, -12 },
{ 20, 21, -12 }
}
}
},
{
.bpp = DSC_BPP(4), .bpc = 14,
{ 512, 12, 6144, 15, 24, 23, 23, {
{ 0, 12, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
{ 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
{ 24, 25, -12 }
}
}
},
{
.bpp = DSC_BPP(4), .bpc = 16,
{ 512, 12, 6144, 19, 28, 27, 27, {
{ 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
{ 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
{ 28, 29, -12 }
}
}
},
{
.bpp = DSC_BPP(5), .bpc = 8,
{ 410, 15, 5632, 3, 12, 11, 11, {
{ 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
{ 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
}
}
},
{
.bpp = DSC_BPP(5), .bpc = 10,
{ 410, 15, 5632, 7, 16, 15, 15, {
{ 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
{ 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
}
}
},
{
.bpp = DSC_BPP(5), .bpc = 12,
{ 410, 15, 5632, 11, 20, 19, 19, {
{ 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
{ 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
{ 19, 20, -12 }
}
}
},
{
.bpp = DSC_BPP(5), .bpc = 14,
{ 410, 15, 5632, 15, 24, 23, 23, {
{ 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
{ 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
{ 23, 24, -12 }
}
}
},
{
.bpp = DSC_BPP(5), .bpc = 16,
{ 410, 15, 5632, 19, 28, 27, 27, {
{ 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
{ 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
{ 27, 28, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 8,
{ 341, 15, 2048, 3, 12, 11, 11, {
{ 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
{ 3, 8, -8 }, { 3, 9, -10 }, { 5, 9, -10 }, { 5, 9, -12 },
{ 5, 9, -12 }, { 7, 10, -12 }, { 10, 12, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 10,
{ 341, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
{ 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
{ 7, 12, -8 }, { 7, 13, -10 }, { 9, 13, -10 }, { 9, 13, -12 },
{ 9, 13, -12 }, { 11, 14, -12 }, { 14, 15, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 12,
{ 341, 15, 2048, 11, 20, 19, 19, {
{ 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
{ 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
{ 11, 16, -8 }, { 11, 17, -10 }, { 13, 17, -10 },
{ 13, 17, -12 }, { 13, 17, -12 }, { 15, 18, -12 },
{ 18, 19, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 14,
{ 341, 15, 2048, 15, 24, 23, 23, {
{ 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
{ 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
{ 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
{ 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
{ 22, 23, -12 }
}
}
},
{
.bpp = DSC_BPP(6), .bpc = 16,
{ 341, 15, 2048, 19, 28, 27, 27, {
{ 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
{ 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
{ 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
{ 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
{ 26, 27, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 8,
{ 256, 15, 2048, 3, 12, 11, 11, {
{ 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
{ 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
{ 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
{ 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 10,
{ 256, 15, 2048, 7, 16, 15, 15, {
{ 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
{ 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
{ 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
{ 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 12,
{ 256, 15, 2048, 11, 20, 19, 19, {
{ 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
{ 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
{ 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
{ 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
{ 16, 17, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 14,
{ 256, 15, 2048, 15, 24, 23, 23, {
{ 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
{ 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
{ 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
{ 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
{ 20, 21, -12 }
}
}
},
{
.bpp = DSC_BPP(8), .bpc = 16,
{ 256, 15, 2048, 19, 28, 27, 27, {
{ 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
{ 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
{ 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
{ 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
{ 24, 25, -12 }
}
}
},
{ /* sentinel */ }
};
static const struct rc_parameters *get_rc_params(const struct rc_parameters_data *rc_parameters,
u16 dsc_bpp,
u8 bits_per_component)
{
int i;
for (i = 0; rc_parameters[i].bpp; i++)
if (rc_parameters[i].bpp == dsc_bpp &&
rc_parameters[i].bpc == bits_per_component)
return &rc_parameters[i].params;
return NULL;
}
/**
* drm_dsc_setup_rc_params() - Set parameters and limits for RC model in
* accordance with the DSC 1.1 or 1.2 specification and DSC C Model
* Required bits_per_pixel and bits_per_component to be set before calling this
* function.
*
* @vdsc_cfg: DSC Configuration data partially filled by driver
* @type: operating mode and standard to follow
*
* Return: 0 or -error code in case of an error
*/
int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type)
{
const struct rc_parameters_data *data;
const struct rc_parameters *rc_params;
int i;
if (WARN_ON_ONCE(!vdsc_cfg->bits_per_pixel ||
!vdsc_cfg->bits_per_component))
return -EINVAL;
switch (type) {
case DRM_DSC_1_2_444:
data = rc_parameters_1_2_444;
break;
case DRM_DSC_1_1_PRE_SCR:
data = rc_parameters_pre_scr;
break;
case DRM_DSC_1_2_422:
data = rc_parameters_1_2_422;
break;
case DRM_DSC_1_2_420:
data = rc_parameters_1_2_420;
break;
default:
return -EINVAL;
}
rc_params = get_rc_params(data,
vdsc_cfg->bits_per_pixel,
vdsc_cfg->bits_per_component);
if (!rc_params)
return -EINVAL;
vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
vdsc_cfg->initial_offset = rc_params->initial_offset;
vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp;
vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp;
vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0;
vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
vdsc_cfg->rc_range_params[i].range_min_qp =
rc_params->rc_range_params[i].range_min_qp;
vdsc_cfg->rc_range_params[i].range_max_qp =
rc_params->rc_range_params[i].range_max_qp;
/*
* Range BPG Offset uses 2's complement and is only a 6 bits. So
* mask it to get only 6 bits.
*/
vdsc_cfg->rc_range_params[i].range_bpg_offset =
rc_params->rc_range_params[i].range_bpg_offset &
DSC_RANGE_BPG_OFFSET_MASK;
}
return 0;
}
EXPORT_SYMBOL(drm_dsc_setup_rc_params);
/**
* drm_dsc_compute_rc_parameters() - Write rate control
* parameters to the dsc configuration defined in
* &struct drm_dsc_config in accordance with the DSC 1.2
* specification. Some configuration fields must be present
* beforehand.
*
* @vdsc_cfg:
* DSC Configuration data partially filled by driver
*/
int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
{
unsigned long groups_per_line = 0;
unsigned long groups_total = 0;
unsigned long num_extra_mux_bits = 0;
unsigned long slice_bits = 0;
unsigned long hrd_delay = 0;
unsigned long final_scale = 0;
unsigned long rbs_min = 0;
if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
/* Number of groups used to code each line of a slice */
groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
DSC_RC_PIXELS_PER_GROUP);
/* chunksize in Bytes */
vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
vdsc_cfg->bits_per_pixel,
(8 * 16));
} else {
/* Number of groups used to code each line of a slice */
groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
DSC_RC_PIXELS_PER_GROUP);
/* chunksize in Bytes */
vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
vdsc_cfg->bits_per_pixel,
(8 * 16));
}
if (vdsc_cfg->convert_rgb)
num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
(4 * vdsc_cfg->bits_per_component + 4)
- 2);
else if (vdsc_cfg->native_422)
num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
(4 * vdsc_cfg->bits_per_component + 4) +
3 * (4 * vdsc_cfg->bits_per_component) - 2;
else
num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
(4 * vdsc_cfg->bits_per_component + 4) +
2 * (4 * vdsc_cfg->bits_per_component) - 2;
/* Number of bits in one Slice */
slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
while ((num_extra_mux_bits > 0) &&
((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
num_extra_mux_bits--;
if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
vdsc_cfg->initial_scale_value = groups_per_line + 8;
/* scale_decrement_interval calculation according to DSC spec 1.11 */
if (vdsc_cfg->initial_scale_value > 8)
vdsc_cfg->scale_decrement_interval = groups_per_line /
(vdsc_cfg->initial_scale_value - 8);
else
vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
(vdsc_cfg->initial_xmit_delay *
vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
return -ERANGE;
}
final_scale = (vdsc_cfg->rc_model_size * 8) /
(vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
if (vdsc_cfg->slice_height > 1)
/*
* NflBpgOffset is 16 bit value with 11 fractional bits
* hence we multiply by 2^11 for preserving the
* fractional part
*/
vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
(vdsc_cfg->slice_height - 1));
else
vdsc_cfg->nfl_bpg_offset = 0;
/* Number of groups used to code the entire slice */
groups_total = groups_per_line * vdsc_cfg->slice_height;
/* slice_bpg_offset is 16 bit value with 11 fractional bits */
vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
vdsc_cfg->initial_offset +
num_extra_mux_bits) << 11),
groups_total);
if (final_scale > 9) {
/*
* ScaleIncrementInterval =
* finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
* as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
* we need divide by 2^11 from pstDscCfg values
*/
vdsc_cfg->scale_increment_interval =
(vdsc_cfg->final_offset * (1 << 11)) /
((vdsc_cfg->nfl_bpg_offset +
vdsc_cfg->slice_bpg_offset) *
(final_scale - 9));
} else {
/*
* If finalScaleValue is less than or equal to 9, a value of 0 should
* be used to disable the scale increment at the end of the slice
*/
vdsc_cfg->scale_increment_interval = 0;
}
/*
* DSC spec mentions that bits_per_pixel specifies the target
* bits/pixel (bpp) rate that is used by the encoder,
* in steps of 1/16 of a bit per pixel
*/
rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
vdsc_cfg->bits_per_pixel, 16) +
groups_per_line * vdsc_cfg->first_line_bpg_offset;
hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
return 0;
}
EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
/**
* drm_dsc_get_bpp_int() - Get integer bits per pixel value for the given DRM DSC config
* @vdsc_cfg: Pointer to DRM DSC config struct
*
* Return: Integer BPP value
*/
u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg)
{
WARN_ON_ONCE(vdsc_cfg->bits_per_pixel & 0xf);
return vdsc_cfg->bits_per_pixel >> 4;
}
EXPORT_SYMBOL(drm_dsc_get_bpp_int);
/**
* drm_dsc_initial_scale_value() - Calculate the initial scale value for the given DSC config
* @dsc: Pointer to DRM DSC config struct
*
* Return: Calculated initial scale value
*/
u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc)
{
return 8 * dsc->rc_model_size / (dsc->rc_model_size - dsc->initial_offset);
}
EXPORT_SYMBOL(drm_dsc_initial_scale_value);
/**
* drm_dsc_flatness_det_thresh() - Calculate the flatness_det_thresh for the given DSC config
* @dsc: Pointer to DRM DSC config struct
*
* Return: Calculated flatness det thresh value
*/
u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc)
{
return 2 << (dsc->bits_per_component - 8);
}
EXPORT_SYMBOL(drm_dsc_flatness_det_thresh);
| linux-master | drivers/gpu/drm/display/drm_dsc_helper.c |
// SPDX-License-Identifier: MIT
#include <linux/module.h>
#include "drm_dp_helper_internal.h"
MODULE_DESCRIPTION("DRM display adapter helper");
MODULE_LICENSE("GPL and additional rights");
static int __init drm_display_helper_module_init(void)
{
return drm_dp_aux_dev_init();
}
static void __exit drm_display_helper_module_exit(void)
{
/* Call exit functions from specific dp helpers here */
drm_dp_aux_dev_exit();
}
module_init(drm_display_helper_module_init);
module_exit(drm_display_helper_module_exit);
| linux-master | drivers/gpu/drm/display/drm_display_helper_mod.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DisplayPort CEC-Tunneling-over-AUX support
*
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <media/cec.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
* have a converter chip that supports CEC-Tunneling-over-AUX (usually the
* Parade PS176), but they do not wire up the CEC pin, thus making CEC
* useless. Note that MegaChips 2900-based adapters appear to have good
* support for CEC tunneling. Those adapters that I have tested using
* this chipset all have the CEC line connected.
*
* Sadly there is no way for this driver to know this. What happens is
* that a /dev/cecX device is created that is isolated and unable to see
* any of the other CEC devices. Quite literally the CEC wire is cut
* (or in this case, never connected in the first place).
*
* The reason so few adapters support this is that this tunneling protocol
* was never supported by any OS. So there was no easy way of testing it,
* and no incentive to correctly wire up the CEC pin.
*
* Hopefully by creating this driver it will be easier for vendors to
* finally fix their adapters and test the CEC functionality.
*
* I keep a list of known working adapters here:
*
* https://hverkuil.home.xs4all.nl/cec-status.txt
*
* Please mail me ([email protected]) if you find an adapter that works
* and is not yet listed there.
*
* Note that the current implementation does not support CEC over an MST hub.
* As far as I can see there is no mechanism defined in the DisplayPort
* standard to transport CEC interrupts over an MST device. It might be
* possible to do this through polling, but I have not been able to get that
* to work.
*/
/**
* DOC: dp cec helpers
*
* These functions take care of supporting the CEC-Tunneling-over-AUX
* feature of DisplayPort-to-HDMI adapters.
*/
/*
* When the EDID is unset because the HPD went low, then the CEC DPCD registers
* typically can no longer be read (true for a DP-to-HDMI adapter since it is
* powered by the HPD). However, some displays toggle the HPD off and on for a
* short period for one reason or another, and that would cause the CEC adapter
* to be removed and added again, even though nothing else changed.
*
* This module parameter sets a delay in seconds before the CEC adapter is
* actually unregistered. Only if the HPD does not return within that time will
* the CEC adapter be unregistered.
*
* If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
* be unregistered for as long as the connector remains registered.
*
* If it is set to 0, then the CEC adapter will be unregistered immediately as
* soon as the HPD disappears.
*
* The default is one second to prevent short HPD glitches from unregistering
* the CEC adapter.
*
* Note that for integrated HDMI branch devices that support CEC the DPCD
* registers remain available even if the HPD goes low since it is not powered
* by the HPD. In that case the CEC adapter will never be unregistered during
* the life time of the connector. At least, this is the theory since I do not
* have hardware with an integrated HDMI branch device that supports CEC.
*/
#define NEVER_UNREG_DELAY 1000
static unsigned int drm_dp_cec_unregister_delay = 1;
module_param(drm_dp_cec_unregister_delay, uint, 0600);
MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
"CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
/* Bit 15 (logical address 15) should always be set */
u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
u8 mask[2];
ssize_t err;
if (addr != CEC_LOG_ADDR_INVALID)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
unsigned int retries = min(5, attempts - 1);
ssize_t err;
err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
msg->msg, msg->len);
if (err < 0)
return err;
err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
(msg->len - 1) | (retries << 4) |
DP_CEC_TX_MESSAGE_SEND);
return err < 0 ? err : 0;
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
bool enable)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
ssize_t err;
u8 val;
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
if (err >= 0) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
static void drm_dp_cec_adap_status(struct cec_adapter *adap,
struct seq_file *file)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
struct drm_dp_desc desc;
struct drm_dp_dpcd_ident *id = &desc.ident;
if (drm_dp_read_desc(aux, &desc, true))
return;
seq_printf(file, "OUI: %*phD\n",
(int)sizeof(id->oui), id->oui);
seq_printf(file, "ID: %*pE\n",
(int)strnlen(id->device_id, sizeof(id->device_id)),
id->device_id);
seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
/*
* Show this both in decimal and hex: at least one vendor
* always reports this in hex.
*/
seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
id->sw_major_rev, id->sw_minor_rev,
id->sw_major_rev, id->sw_minor_rev);
}
static const struct cec_adap_ops drm_dp_cec_adap_ops = {
.adap_enable = drm_dp_cec_adap_enable,
.adap_log_addr = drm_dp_cec_adap_log_addr,
.adap_transmit = drm_dp_cec_adap_transmit,
.adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
.adap_status = drm_dp_cec_adap_status,
};
static int drm_dp_cec_received(struct drm_dp_aux *aux)
{
struct cec_adapter *adap = aux->cec.adap;
struct cec_msg msg;
u8 rx_msg_info;
ssize_t err;
err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
cec_received_msg(adap, &msg);
return 0;
}
static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
{
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
drm_dp_cec_received(aux);
if (flags & DP_CEC_TX_MESSAGE_SENT)
cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
else if (flags & DP_CEC_TX_LINE_ERROR)
cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
CEC_TX_STATUS_MAX_RETRIES);
else if (flags &
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
* drm_dp_cec_irq() - handle CEC interrupt, if any
* @aux: DisplayPort AUX channel
*
* Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
* is present, then it will check for a CEC_IRQ and handle it accordingly.
*/
void drm_dp_cec_irq(struct drm_dp_aux *aux)
{
u8 cec_irq;
int ret;
/* No transfer function was set, so not a DP connector */
if (!aux->transfer)
return;
mutex_lock(&aux->cec.lock);
if (!aux->cec.adap)
goto unlock;
ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
&cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
EXPORT_SYMBOL(drm_dp_cec_irq);
static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
*cec_cap = cap;
return true;
}
/*
* Called if the HPD was low for more than drm_dp_cec_unregister_delay
* seconds. This unregisters the CEC adapter.
*/
static void drm_dp_cec_unregister_work(struct work_struct *work)
{
struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
cec.unregister_work.work);
mutex_lock(&aux->cec.lock);
cec_unregister_adapter(aux->cec.adap);
aux->cec.adap = NULL;
mutex_unlock(&aux->cec.lock);
}
/*
* A new EDID is set. If there is no CEC adapter, then create one. If
* there was a CEC adapter, then check if the CEC adapter properties
* were unchanged and just update the CEC physical address. Otherwise
* unregister the old CEC adapter and create a new one.
*/
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
{
struct drm_connector *connector = aux->cec.connector;
u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
CEC_CAP_CONNECTOR_INFO;
struct cec_connector_info conn_info;
unsigned int num_las = 1;
u8 cap;
/* No transfer function was set, so not a DP connector */
if (!aux->transfer)
return;
#ifndef CONFIG_MEDIA_CEC_RC
/*
* CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
* cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
*
* Do this here as well to ensure the tests against cec_caps are
* correct.
*/
cec_caps &= ~CEC_CAP_RC;
#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
if (!drm_dp_cec_cap(aux, &cap)) {
/* CEC is not supported, unregister any existing adapter */
cec_unregister_adapter(aux->cec.adap);
aux->cec.adap = NULL;
goto unlock;
}
if (cap & DP_CEC_SNOOPING_CAPABLE)
cec_caps |= CEC_CAP_MONITOR_ALL;
if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
if (aux->cec.adap->capabilities == cec_caps &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr_from_edid(aux->cec.adap, edid);
goto unlock;
}
/*
* The capabilities changed, so unregister the old
* adapter first.
*/
cec_unregister_adapter(aux->cec.adap);
}
/* Create a new adapter */
aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
aux, connector->name, cec_caps,
num_las);
if (IS_ERR(aux->cec.adap)) {
aux->cec.adap = NULL;
goto unlock;
}
cec_fill_conn_info_from_drm(&conn_info, connector);
cec_s_conn_info(aux->cec.adap, &conn_info);
if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
cec_delete_adapter(aux->cec.adap);
aux->cec.adap = NULL;
} else {
/*
* Update the phys addr for the new CEC adapter. When called
* from drm_dp_cec_register_connector() edid == NULL, so in
* that case the phys addr is just invalidated.
*/
cec_s_phys_addr_from_edid(aux->cec.adap, edid);
}
unlock:
mutex_unlock(&aux->cec.lock);
}
EXPORT_SYMBOL(drm_dp_cec_set_edid);
/*
* The EDID disappeared (likely because of the HPD going down).
*/
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
{
/* No transfer function was set, so not a DP connector */
if (!aux->transfer)
return;
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
if (!aux->cec.adap)
goto unlock;
cec_phys_addr_invalidate(aux->cec.adap);
/*
* We're done if we want to keep the CEC device
* (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
* DPCD still indicates the CEC capability (expected for an integrated
* HDMI branch device).
*/
if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
!drm_dp_cec_cap(aux, NULL)) {
/*
* Unregister the CEC adapter after drm_dp_cec_unregister_delay
* seconds. This to debounce short HPD off-and-on cycles from
* displays.
*/
schedule_delayed_work(&aux->cec.unregister_work,
drm_dp_cec_unregister_delay * HZ);
}
unlock:
mutex_unlock(&aux->cec.lock);
}
EXPORT_SYMBOL(drm_dp_cec_unset_edid);
/**
* drm_dp_cec_register_connector() - register a new connector
* @aux: DisplayPort AUX channel
* @connector: drm connector
*
* A new connector was registered with associated CEC adapter name and
* CEC adapter parent device. After registering the name and parent
* drm_dp_cec_set_edid() is called to check if the connector supports
* CEC and to register a CEC adapter if that is the case.
*/
void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
struct drm_connector *connector)
{
WARN_ON(aux->cec.adap);
if (WARN_ON(!aux->transfer))
return;
aux->cec.connector = connector;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
}
EXPORT_SYMBOL(drm_dp_cec_register_connector);
/**
* drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
* @aux: DisplayPort AUX channel
*/
void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
{
if (!aux->cec.adap)
return;
cancel_delayed_work_sync(&aux->cec.unregister_work);
cec_unregister_adapter(aux->cec.adap);
aux->cec.adap = NULL;
}
EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
| linux-master | drivers/gpu/drm/display/drm_dp_cec.c |
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Rafael Antognolli <[email protected]>
*
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include "drm_dp_helper_internal.h"
struct drm_dp_aux_dev {
unsigned index;
struct drm_dp_aux *aux;
struct device *dev;
struct kref refcount;
atomic_t usecount;
};
#define DRM_AUX_MINORS 256
#define AUX_MAX_OFFSET (1 << 20)
static DEFINE_IDR(aux_idr);
static DEFINE_MUTEX(aux_idr_mutex);
static struct class *drm_dp_aux_dev_class;
static int drm_dev_major = -1;
static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
{
struct drm_dp_aux_dev *aux_dev = NULL;
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
return aux_dev;
}
static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
int index;
aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL);
if (!aux_dev)
return ERR_PTR(-ENOMEM);
aux_dev->aux = aux;
atomic_set(&aux_dev->usecount, 1);
kref_init(&aux_dev->refcount);
mutex_lock(&aux_idr_mutex);
index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL);
mutex_unlock(&aux_idr_mutex);
if (index < 0) {
kfree(aux_dev);
return ERR_PTR(index);
}
aux_dev->index = index;
return aux_dev;
}
static void release_drm_dp_aux_dev(struct kref *ref)
{
struct drm_dp_aux_dev *aux_dev =
container_of(ref, struct drm_dp_aux_dev, refcount);
kfree(aux_dev);
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t res;
struct drm_dp_aux_dev *aux_dev =
drm_dp_aux_dev_get_by_minor(MINOR(dev->devt));
if (!aux_dev)
return -ENODEV;
res = sprintf(buf, "%s\n", aux_dev->aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
return res;
}
static DEVICE_ATTR_RO(name);
static struct attribute *drm_dp_aux_attrs[] = {
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(drm_dp_aux);
static int auxdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct drm_dp_aux_dev *aux_dev;
aux_dev = drm_dp_aux_dev_get_by_minor(minor);
if (!aux_dev)
return -ENODEV;
file->private_data = aux_dev;
return 0;
}
static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
{
return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
}
static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
iov_iter_truncate(to, AUX_MAX_OFFSET - pos);
while (iov_iter_count(to)) {
uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min(iov_iter_count(to), sizeof(buf));
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
if (copy_to_iter(buf, res, to) != res) {
res = -EFAULT;
break;
}
pos += res;
}
if (pos != iocb->ki_pos)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
iov_iter_truncate(from, AUX_MAX_OFFSET - pos);
while (iov_iter_count(from)) {
uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min(iov_iter_count(from), sizeof(buf));
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
if (!copy_from_iter_full(buf, todo, from)) {
res = -EFAULT;
break;
}
res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
pos += res;
}
if (pos != iocb->ki_pos)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
static int auxdev_release(struct inode *inode, struct file *file)
{
struct drm_dp_aux_dev *aux_dev = file->private_data;
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
return 0;
}
static const struct file_operations auxdev_fops = {
.owner = THIS_MODULE,
.llseek = auxdev_llseek,
.read_iter = auxdev_read_iter,
.write_iter = auxdev_write_iter,
.open = auxdev_open,
.release = auxdev_release,
};
#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux)
static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *iter, *aux_dev = NULL;
int id;
/* don't increase kref count here because this function should only be
* used by drm_dp_aux_unregister_devnode. Thus, it will always have at
* least one reference - the one that drm_dp_aux_register_devnode
* created
*/
mutex_lock(&aux_idr_mutex);
idr_for_each_entry(&aux_idr, iter, id) {
if (iter->aux == aux) {
aux_dev = iter;
break;
}
}
mutex_unlock(&aux_idr_mutex);
return aux_dev;
}
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
unsigned int minor;
aux_dev = drm_dp_aux_dev_get_by_aux(aux);
if (!aux_dev) /* attach must have failed */
return;
/*
* As some AUX adapters may exist as platform devices which outlive their respective DRM
* devices, we clear drm_dev to ensure that we never accidentally reference a stale pointer
*/
aux->drm_dev = NULL;
mutex_lock(&aux_idr_mutex);
idr_remove(&aux_idr, aux_dev->index);
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
minor = aux_dev->index;
if (aux_dev->dev)
device_destroy(drm_dp_aux_dev_class,
MKDEV(drm_dev_major, minor));
DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
}
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
int res;
aux_dev = alloc_drm_dp_aux_dev(aux);
if (IS_ERR(aux_dev))
return PTR_ERR(aux_dev);
aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev,
MKDEV(drm_dev_major, aux_dev->index), NULL,
"drm_dp_aux%d", aux_dev->index);
if (IS_ERR(aux_dev->dev)) {
res = PTR_ERR(aux_dev->dev);
aux_dev->dev = NULL;
goto error;
}
DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n",
aux->name, aux_dev->index);
return 0;
error:
drm_dp_aux_unregister_devnode(aux);
return res;
}
int drm_dp_aux_dev_init(void)
{
int res;
drm_dp_aux_dev_class = class_create("drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
res = register_chrdev(0, "aux", &auxdev_fops);
if (res < 0)
goto out;
drm_dev_major = res;
return 0;
out:
class_destroy(drm_dp_aux_dev_class);
return res;
}
void drm_dp_aux_dev_exit(void)
{
unregister_chrdev(drm_dev_major, "aux");
class_destroy(drm_dp_aux_dev_class);
}
| linux-master | drivers/gpu/drm/display/drm_dp_aux_dev.c |
// SPDX-License-Identifier: MIT
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/drm_property.h>
static inline bool is_eotf_supported(u8 output_eotf, u8 sink_eotf)
{
return sink_eotf & BIT(output_eotf);
}
/**
* drm_hdmi_infoframe_set_hdr_metadata() - fill an HDMI DRM infoframe with
* HDR metadata from userspace
* @frame: HDMI DRM infoframe
* @conn_state: Connector state containing HDR metadata
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
const struct drm_connector_state *conn_state)
{
struct drm_connector *connector;
struct hdr_output_metadata *hdr_metadata;
int err;
if (!frame || !conn_state)
return -EINVAL;
connector = conn_state->connector;
if (!conn_state->hdr_output_metadata)
return -EINVAL;
hdr_metadata = conn_state->hdr_output_metadata->data;
if (!hdr_metadata || !connector)
return -EINVAL;
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
connector->hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
if (err < 0)
return err;
frame->eotf = hdr_metadata->hdmi_metadata_type1.eotf;
frame->metadata_type = hdr_metadata->hdmi_metadata_type1.metadata_type;
BUILD_BUG_ON(sizeof(frame->display_primaries) !=
sizeof(hdr_metadata->hdmi_metadata_type1.display_primaries));
BUILD_BUG_ON(sizeof(frame->white_point) !=
sizeof(hdr_metadata->hdmi_metadata_type1.white_point));
memcpy(&frame->display_primaries,
&hdr_metadata->hdmi_metadata_type1.display_primaries,
sizeof(frame->display_primaries));
memcpy(&frame->white_point,
&hdr_metadata->hdmi_metadata_type1.white_point,
sizeof(frame->white_point));
frame->max_display_mastering_luminance =
hdr_metadata->hdmi_metadata_type1.max_display_mastering_luminance;
frame->min_display_mastering_luminance =
hdr_metadata->hdmi_metadata_type1.min_display_mastering_luminance;
frame->max_fall = hdr_metadata->hdmi_metadata_type1.max_fall;
frame->max_cll = hdr_metadata->hdmi_metadata_type1.max_cll;
return 0;
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
/* HDMI Colorspace Spec Definitions */
#define FULL_COLORIMETRY_MASK 0x1FF
#define NORMAL_COLORIMETRY_MASK 0x3
#define EXTENDED_COLORIMETRY_MASK 0x7
#define EXTENDED_ACE_COLORIMETRY_MASK 0xF
#define C(x) ((x) << 0)
#define EC(x) ((x) << 2)
#define ACE(x) ((x) << 5)
#define HDMI_COLORIMETRY_NO_DATA 0x0
#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0))
#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0))
#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0))
#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0))
#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0))
#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0))
#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0))
#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0))
#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0))
#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0))
#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0))
#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1))
static const u32 hdmi_colorimetry_val[] = {
[DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA,
[DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC,
[DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC,
[DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601,
[DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709,
[DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601,
[DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601,
[DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB,
[DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC,
[DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB,
[DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC,
};
#undef C
#undef EC
#undef ACE
/**
* drm_hdmi_avi_infoframe_colorimetry() - fill the HDMI AVI infoframe
* colorimetry information
* @frame: HDMI AVI infoframe
* @conn_state: connector state
*/
void drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
u32 colorimetry_val;
u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK;
if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val))
colorimetry_val = HDMI_COLORIMETRY_NO_DATA;
else
colorimetry_val = hdmi_colorimetry_val[colorimetry_index];
frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK;
/*
* ToDo: Extend it for ACE formats as well. Modify the infoframe
* structure and extend it in drivers/video/hdmi
*/
frame->extended_colorimetry = (colorimetry_val >> 2) &
EXTENDED_COLORIMETRY_MASK;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorimetry);
/**
* drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
* bar information
* @frame: HDMI AVI infoframe
* @conn_state: connector state
*/
void drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
frame->right_bar = conn_state->tv.margins.right;
frame->left_bar = conn_state->tv.margins.left;
frame->top_bar = conn_state->tv.margins.top;
frame->bottom_bar = conn_state->tv.margins.bottom;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
/**
* drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
* content type information, based
* on correspondent DRM property.
* @frame: HDMI AVI infoframe
* @conn_state: DRM display connector state
*
*/
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
switch (conn_state->content_type) {
case DRM_MODE_CONTENT_TYPE_GRAPHICS:
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
break;
case DRM_MODE_CONTENT_TYPE_CINEMA:
frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
break;
case DRM_MODE_CONTENT_TYPE_GAME:
frame->content_type = HDMI_CONTENT_TYPE_GAME;
break;
case DRM_MODE_CONTENT_TYPE_PHOTO:
frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
break;
default:
/* Graphics is the default(0) */
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
}
frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
| linux-master | drivers/gpu/drm/display/drm_hdmi_helper.c |
/*
* Copyright © 2014 Red Hat
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
#include <linux/sort.h>
#include <linux/timekeeping.h>
#include <linux/math64.h>
#endif
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm_dp_helper_internal.h"
#include "drm_dp_mst_topology_internal.h"
/**
* DOC: dp mst helper
*
* These functions contain parts of the DisplayPort 1.2a MultiStream Transport
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
struct drm_dp_pending_up_req {
struct drm_dp_sideband_msg_hdr hdr;
struct drm_dp_sideband_msg_req_body msg;
struct list_head next;
};
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id, u8 start_slot, u8 num_slots);
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
static void
drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid);
static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *branch);
#define DBG_PREFIX "[dp_mst]"
#define DP_STR(x) [DP_ ## x] = #x
static const char *drm_dp_mst_req_type_str(u8 req_type)
{
static const char * const req_type_str[] = {
DP_STR(GET_MSG_TRANSACTION_VERSION),
DP_STR(LINK_ADDRESS),
DP_STR(CONNECTION_STATUS_NOTIFY),
DP_STR(ENUM_PATH_RESOURCES),
DP_STR(ALLOCATE_PAYLOAD),
DP_STR(QUERY_PAYLOAD),
DP_STR(RESOURCE_STATUS_NOTIFY),
DP_STR(CLEAR_PAYLOAD_ID_TABLE),
DP_STR(REMOTE_DPCD_READ),
DP_STR(REMOTE_DPCD_WRITE),
DP_STR(REMOTE_I2C_READ),
DP_STR(REMOTE_I2C_WRITE),
DP_STR(POWER_UP_PHY),
DP_STR(POWER_DOWN_PHY),
DP_STR(SINK_EVENT_NOTIFY),
DP_STR(QUERY_STREAM_ENC_STATUS),
};
if (req_type >= ARRAY_SIZE(req_type_str) ||
!req_type_str[req_type])
return "unknown";
return req_type_str[req_type];
}
#undef DP_STR
#define DP_STR(x) [DP_NAK_ ## x] = #x
static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
{
static const char * const nak_reason_str[] = {
DP_STR(WRITE_FAILURE),
DP_STR(INVALID_READ),
DP_STR(CRC_FAILURE),
DP_STR(BAD_PARAM),
DP_STR(DEFER),
DP_STR(LINK_FAILURE),
DP_STR(NO_RESOURCES),
DP_STR(DPCD_FAIL),
DP_STR(I2C_NAK),
DP_STR(ALLOCATE_FAIL),
};
if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
!nak_reason_str[nak_reason])
return "unknown";
return nak_reason_str[nak_reason];
}
#undef DP_STR
#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
static const char *drm_dp_mst_sideband_tx_state_str(int state)
{
static const char * const sideband_reason_str[] = {
DP_STR(QUEUED),
DP_STR(START_SEND),
DP_STR(SENT),
DP_STR(RX),
DP_STR(TIMEOUT),
};
if (state >= ARRAY_SIZE(sideband_reason_str) ||
!sideband_reason_str[state])
return "unknown";
return sideband_reason_str[state];
}
static int
drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
{
int i;
u8 unpacked_rad[16];
for (i = 0; i < lct; i++) {
if (i % 2)
unpacked_rad[i] = rad[i / 2] >> 4;
else
unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
}
/* TODO: Eventually add something to printk so we can format the rad
* like this: 1.2.3
*/
return snprintf(out, len, "%*phC", lct, unpacked_rad);
}
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
{
u8 bitmask = 0x80;
u8 bitshift = 7;
u8 array_index = 0;
int number_of_bits = num_nibbles * 4;
u8 remainder = 0;
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
remainder |= (data[array_index] & bitmask) >> bitshift;
bitmask >>= 1;
bitshift--;
if (bitmask == 0) {
bitmask = 0x80;
bitshift = 7;
array_index++;
}
if ((remainder & 0x10) == 0x10)
remainder ^= 0x13;
}
number_of_bits = 4;
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
if ((remainder & 0x10) != 0)
remainder ^= 0x13;
}
return remainder;
}
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
{
u8 bitmask = 0x80;
u8 bitshift = 7;
u8 array_index = 0;
int number_of_bits = number_of_bytes * 8;
u16 remainder = 0;
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
remainder |= (data[array_index] & bitmask) >> bitshift;
bitmask >>= 1;
bitshift--;
if (bitmask == 0) {
bitmask = 0x80;
bitshift = 7;
array_index++;
}
if ((remainder & 0x100) == 0x100)
remainder ^= 0xd5;
}
number_of_bits = 8;
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
if ((remainder & 0x100) != 0)
remainder ^= 0xd5;
}
return remainder & 0xff;
}
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
{
u8 size = 3;
size += (hdr->lct / 2);
return size;
}
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
u8 *buf, int *len)
{
int idx = 0;
int i;
u8 crc4;
buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
for (i = 0; i < (hdr->lct / 2); i++)
buf[idx++] = hdr->rad[i];
buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
(hdr->msg_len & 0x3f);
buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
buf[idx - 1] |= (crc4 & 0xf);
*len = idx;
}
static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_hdr *hdr,
u8 *buf, int buflen, u8 *hdrlen)
{
u8 crc4;
u8 len;
int i;
u8 idx;
if (buf[0] == 0)
return false;
len = 3;
len += ((buf[0] & 0xf0) >> 4) / 2;
if (len > buflen)
return false;
crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
return false;
}
hdr->lct = (buf[0] & 0xf0) >> 4;
hdr->lcr = (buf[0] & 0xf);
idx = 1;
for (i = 0; i < (hdr->lct / 2); i++)
hdr->rad[i] = buf[idx++];
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
hdr->seqno = (buf[idx] >> 4) & 0x1;
idx++;
*hdrlen = idx;
return true;
}
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
int i;
u8 *buf = raw->msg;
buf[idx++] = req->req_type & 0x7f;
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
idx++;
break;
case DP_ALLOCATE_PAYLOAD:
buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
(req->u.allocate_payload.number_sdp_streams & 0xf);
idx++;
buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
idx++;
buf[idx] = (req->u.allocate_payload.pbn >> 8);
idx++;
buf[idx] = (req->u.allocate_payload.pbn & 0xff);
idx++;
for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
idx++;
}
if (req->u.allocate_payload.number_sdp_streams & 1) {
i = req->u.allocate_payload.number_sdp_streams - 1;
buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
idx++;
}
break;
case DP_QUERY_PAYLOAD:
buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
idx++;
buf[idx] = (req->u.query_payload.vcpi & 0x7f);
idx++;
break;
case DP_REMOTE_DPCD_READ:
buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
idx++;
buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
idx++;
buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
idx++;
buf[idx] = (req->u.dpcd_read.num_bytes);
idx++;
break;
case DP_REMOTE_DPCD_WRITE:
buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
idx++;
buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
idx++;
buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
idx++;
buf[idx] = (req->u.dpcd_write.num_bytes);
idx++;
memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
idx += req->u.dpcd_write.num_bytes;
break;
case DP_REMOTE_I2C_READ:
buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
idx++;
for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
idx++;
buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
idx++;
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
idx++;
buf[idx] = (req->u.i2c_read.num_bytes_read);
idx++;
break;
case DP_REMOTE_I2C_WRITE:
buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
idx++;
buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
idx++;
buf[idx] = (req->u.i2c_write.num_bytes);
idx++;
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
case DP_QUERY_STREAM_ENC_STATUS: {
const struct drm_dp_query_stream_enc_status *msg;
msg = &req->u.enc_status;
buf[idx] = msg->stream_id;
idx++;
memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
idx += sizeof(msg->client_id);
buf[idx] = 0;
buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
idx++;
}
break;
}
raw->cur_len = idx;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
/* Decode a sideband request we've encoded, mainly used for debugging */
int
drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
struct drm_dp_sideband_msg_req_body *req)
{
const u8 *buf = raw->msg;
int i, idx = 0;
req->req_type = buf[idx++] & 0x7f;
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
break;
case DP_ALLOCATE_PAYLOAD:
{
struct drm_dp_allocate_payload *a =
&req->u.allocate_payload;
a->number_sdp_streams = buf[idx] & 0xf;
a->port_number = (buf[idx] >> 4) & 0xf;
WARN_ON(buf[++idx] & 0x80);
a->vcpi = buf[idx] & 0x7f;
a->pbn = buf[++idx] << 8;
a->pbn |= buf[++idx];
idx++;
for (i = 0; i < a->number_sdp_streams; i++) {
a->sdp_stream_sink[i] =
(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
}
}
break;
case DP_QUERY_PAYLOAD:
req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
WARN_ON(buf[++idx] & 0x80);
req->u.query_payload.vcpi = buf[idx] & 0x7f;
break;
case DP_REMOTE_DPCD_READ:
{
struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
r->port_number = (buf[idx] >> 4) & 0xf;
r->dpcd_address = (buf[idx] << 16) & 0xf0000;
r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
r->dpcd_address |= buf[++idx] & 0xff;
r->num_bytes = buf[++idx];
}
break;
case DP_REMOTE_DPCD_WRITE:
{
struct drm_dp_remote_dpcd_write *w =
&req->u.dpcd_write;
w->port_number = (buf[idx] >> 4) & 0xf;
w->dpcd_address = (buf[idx] << 16) & 0xf0000;
w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
w->dpcd_address |= buf[++idx] & 0xff;
w->num_bytes = buf[++idx];
w->bytes = kmemdup(&buf[++idx], w->num_bytes,
GFP_KERNEL);
if (!w->bytes)
return -ENOMEM;
}
break;
case DP_REMOTE_I2C_READ:
{
struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
struct drm_dp_remote_i2c_read_tx *tx;
bool failed = false;
r->num_transactions = buf[idx] & 0x3;
r->port_number = (buf[idx] >> 4) & 0xf;
for (i = 0; i < r->num_transactions; i++) {
tx = &r->transactions[i];
tx->i2c_dev_id = buf[++idx] & 0x7f;
tx->num_bytes = buf[++idx];
tx->bytes = kmemdup(&buf[++idx],
tx->num_bytes,
GFP_KERNEL);
if (!tx->bytes) {
failed = true;
break;
}
idx += tx->num_bytes;
tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
tx->i2c_transaction_delay = buf[idx] & 0xf;
}
if (failed) {
for (i = 0; i < r->num_transactions; i++) {
tx = &r->transactions[i];
kfree(tx->bytes);
}
return -ENOMEM;
}
r->read_i2c_device_id = buf[++idx] & 0x7f;
r->num_bytes_read = buf[++idx];
}
break;
case DP_REMOTE_I2C_WRITE:
{
struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
w->port_number = (buf[idx] >> 4) & 0xf;
w->write_i2c_device_id = buf[++idx] & 0x7f;
w->num_bytes = buf[++idx];
w->bytes = kmemdup(&buf[++idx], w->num_bytes,
GFP_KERNEL);
if (!w->bytes)
return -ENOMEM;
}
break;
case DP_QUERY_STREAM_ENC_STATUS:
req->u.enc_status.stream_id = buf[idx++];
for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
req->u.enc_status.client_id[i] = buf[idx++];
req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
buf[idx]);
req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
buf[idx]);
req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
buf[idx]);
req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
buf[idx]);
break;
}
return 0;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
void
drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
int indent, struct drm_printer *printer)
{
int i;
#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
if (req->req_type == DP_LINK_ADDRESS) {
/* No contents to print */
P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
return;
}
P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
indent++;
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
P("port=%d\n", req->u.port_num.port_number);
break;
case DP_ALLOCATE_PAYLOAD:
P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
req->u.allocate_payload.port_number,
req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
req->u.allocate_payload.number_sdp_streams,
req->u.allocate_payload.number_sdp_streams,
req->u.allocate_payload.sdp_stream_sink);
break;
case DP_QUERY_PAYLOAD:
P("port=%d vcpi=%d\n",
req->u.query_payload.port_number,
req->u.query_payload.vcpi);
break;
case DP_REMOTE_DPCD_READ:
P("port=%d dpcd_addr=%05x len=%d\n",
req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
req->u.dpcd_read.num_bytes);
break;
case DP_REMOTE_DPCD_WRITE:
P("port=%d addr=%05x len=%d: %*ph\n",
req->u.dpcd_write.port_number,
req->u.dpcd_write.dpcd_address,
req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
req->u.dpcd_write.bytes);
break;
case DP_REMOTE_I2C_READ:
P("port=%d num_tx=%d id=%d size=%d:\n",
req->u.i2c_read.port_number,
req->u.i2c_read.num_transactions,
req->u.i2c_read.read_i2c_device_id,
req->u.i2c_read.num_bytes_read);
indent++;
for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
const struct drm_dp_remote_i2c_read_tx *rtx =
&req->u.i2c_read.transactions[i];
P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
i, rtx->i2c_dev_id, rtx->num_bytes,
rtx->no_stop_bit, rtx->i2c_transaction_delay,
rtx->num_bytes, rtx->bytes);
}
break;
case DP_REMOTE_I2C_WRITE:
P("port=%d id=%d size=%d: %*ph\n",
req->u.i2c_write.port_number,
req->u.i2c_write.write_i2c_device_id,
req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
req->u.i2c_write.bytes);
break;
case DP_QUERY_STREAM_ENC_STATUS:
P("stream_id=%u client_id=%*ph stream_event=%x "
"valid_event=%d stream_behavior=%x valid_behavior=%d",
req->u.enc_status.stream_id,
(int)ARRAY_SIZE(req->u.enc_status.client_id),
req->u.enc_status.client_id, req->u.enc_status.stream_event,
req->u.enc_status.valid_stream_event,
req->u.enc_status.stream_behavior,
req->u.enc_status.valid_stream_behavior);
break;
default:
P("???\n");
break;
}
#undef P
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
static inline void
drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
const struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_sideband_msg_req_body req;
char buf[64];
int ret;
int i;
drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
sizeof(buf));
drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
drm_dp_mst_sideband_tx_state_str(txmsg->state),
txmsg->path_msg, buf);
ret = drm_dp_decode_sideband_req(txmsg, &req);
if (ret) {
drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
return;
}
drm_dp_dump_sideband_msg_req_body(&req, 1, p);
switch (req.req_type) {
case DP_REMOTE_DPCD_WRITE:
kfree(req.u.dpcd_write.bytes);
break;
case DP_REMOTE_I2C_READ:
for (i = 0; i < req.u.i2c_read.num_transactions; i++)
kfree(req.u.i2c_read.transactions[i].bytes);
break;
case DP_REMOTE_I2C_WRITE:
kfree(req.u.i2c_write.bytes);
break;
}
}
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
{
u8 crc4;
crc4 = drm_dp_msg_data_crc4(msg, len);
msg[len] = crc4;
}
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
u8 *buf = raw->msg;
buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
raw->cur_len = idx;
}
static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
struct drm_dp_sideband_msg_hdr *hdr,
u8 hdrlen)
{
/*
* ignore out-of-order messages or messages that are part of a
* failed transaction
*/
if (!hdr->somt && !msg->have_somt)
return false;
/* get length contained in this portion */
msg->curchunk_idx = 0;
msg->curchunk_len = hdr->msg_len;
msg->curchunk_hdrlen = hdrlen;
/* we have already gotten an somt - don't bother parsing */
if (hdr->somt && msg->have_somt)
return false;
if (hdr->somt) {
memcpy(&msg->initial_hdr, hdr,
sizeof(struct drm_dp_sideband_msg_hdr));
msg->have_somt = true;
}
if (hdr->eomt)
msg->have_eomt = true;
return true;
}
/* this adds a chunk of msg to the builder to get the final msg */
static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
u8 *replybuf, u8 replybuflen)
{
u8 crc4;
memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
msg->curchunk_idx += replybuflen;
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
if (crc4 != msg->chunk[msg->curchunk_len - 1])
print_hex_dump(KERN_DEBUG, "wrong crc",
DUMP_PREFIX_NONE, 16, 1,
msg->chunk, msg->curchunk_len, false);
/* copy chunk into bigger msg */
memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
msg->curlen += msg->curchunk_len - 1;
}
return true;
}
static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
int i;
memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
idx += 16;
repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
for (i = 0; i < repmsg->u.link_addr.nports; i++) {
if (raw->msg[idx] & 0x80)
repmsg->u.link_addr.ports[i].input_port = 1;
repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
if (repmsg->u.link_addr.ports[i].input_port == 0)
repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
idx++;
if (idx > raw->curlen)
goto fail_len;
if (repmsg->u.link_addr.ports[i].input_port == 0) {
repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
idx++;
if (idx > raw->curlen)
goto fail_len;
memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
idx++;
}
if (idx > raw->curlen)
goto fail_len;
}
return true;
fail_len:
DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
idx++;
if (idx > raw->curlen)
goto fail_len;
memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
return true;
fail_len:
DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
idx++;
/* TODO check */
memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
return true;
fail_len:
DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.allocate_payload.vcpi = raw->msg[idx];
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
idx++;
if (idx > raw->curlen) {
DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
idx, raw->curlen);
return false;
}
return true;
}
static bool
drm_dp_sideband_parse_query_stream_enc_status(
struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
struct drm_dp_query_stream_enc_status_ack_reply *reply;
reply = &repmsg->u.enc_status;
reply->stream_id = raw->msg[3];
reply->reply_signed = raw->msg[2] & BIT(0);
/*
* NOTE: It's my impression from reading the spec that the below parsing
* is correct. However I noticed while testing with an HDCP 1.4 display
* through an HDCP 2.2 hub that only bit 3 was set. In that case, I
* would expect both bits to be set. So keep the parsing following the
* spec, but beware reality might not match the spec (at least for some
* configurations).
*/
reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
reply->query_capable_device_present = raw->msg[2] & BIT(5);
reply->legacy_device_present = raw->msg[2] & BIT(6);
reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
reply->auth_completed = !!(raw->msg[1] & BIT(3));
reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
reply->repeater_present = !!(raw->msg[1] & BIT(5));
reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
return true;
}
static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
memset(msg, 0, sizeof(*msg));
msg->reply_type = (raw->msg[0] & 0x80) >> 7;
msg->req_type = (raw->msg[0] & 0x7f);
if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
memcpy(msg->u.nak.guid, &raw->msg[1], 16);
msg->u.nak.reason = raw->msg[17];
msg->u.nak.nak_data = raw->msg[18];
return false;
}
switch (msg->req_type) {
case DP_LINK_ADDRESS:
return drm_dp_sideband_parse_link_address(mgr, raw, msg);
case DP_QUERY_PAYLOAD:
return drm_dp_sideband_parse_query_payload_ack(raw, msg);
case DP_REMOTE_DPCD_READ:
return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
case DP_REMOTE_DPCD_WRITE:
return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
case DP_REMOTE_I2C_READ:
return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
case DP_REMOTE_I2C_WRITE:
return true; /* since there's nothing to parse */
case DP_ENUM_PATH_RESOURCES:
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD:
return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
case DP_CLEAR_PAYLOAD_ID_TABLE:
return true; /* since there's nothing to parse */
case DP_QUERY_STREAM_ENC_STATUS:
return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
default:
drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
return false;
}
}
static bool
drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
int idx = 1;
msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
idx++;
if (idx > raw->curlen)
goto fail_len;
memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
idx++;
return true;
fail_len:
drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
int idx = 1;
msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
idx++;
if (idx > raw->curlen)
goto fail_len;
memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
idx++;
return true;
fail_len:
drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
memset(msg, 0, sizeof(*msg));
msg->req_type = (raw->msg[0] & 0x7f);
switch (msg->req_type) {
case DP_CONNECTION_STATUS_NOTIFY:
return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
case DP_RESOURCE_STATUS_NOTIFY:
return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
default:
drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
return false;
}
}
static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_REMOTE_DPCD_WRITE;
req.u.dpcd_write.port_number = port_num;
req.u.dpcd_write.dpcd_address = offset;
req.u.dpcd_write.num_bytes = num_bytes;
req.u.dpcd_write.bytes = bytes;
drm_dp_encode_sideband_req(&req, msg);
}
static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS;
drm_dp_encode_sideband_req(&req, msg);
}
static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
}
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
int port_num)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_ENUM_PATH_RESOURCES;
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
return 0;
}
static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
int port_num,
u8 vcpi, uint16_t pbn,
u8 number_sdp_streams,
u8 *sdp_stream_sink)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
req.req_type = DP_ALLOCATE_PAYLOAD;
req.u.allocate_payload.port_number = port_num;
req.u.allocate_payload.vcpi = vcpi;
req.u.allocate_payload.pbn = pbn;
req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
}
static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
int port_num, bool power_up)
{
struct drm_dp_sideband_msg_req_body req;
if (power_up)
req.req_type = DP_POWER_UP_PHY;
else
req.req_type = DP_POWER_DOWN_PHY;
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
}
static int
build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
u8 *q_id)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_QUERY_STREAM_ENC_STATUS;
req.u.enc_status.stream_id = stream_id;
memcpy(req.u.enc_status.client_id, q_id,
sizeof(req.u.enc_status.client_id));
req.u.enc_status.stream_event = 0;
req.u.enc_status.valid_stream_event = false;
req.u.enc_status.stream_behavior = 0;
req.u.enc_status.valid_stream_behavior = false;
drm_dp_encode_sideband_req(&req, msg);
return 0;
}
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
unsigned int state;
/*
* All updates to txmsg->state are protected by mgr->qlock, and the two
* cases we check here are terminal states. For those the barriers
* provided by the wake_up/wait_event pair are enough.
*/
state = READ_ONCE(txmsg->state);
return (state == DRM_DP_SIDEBAND_TX_RX ||
state == DRM_DP_SIDEBAND_TX_TIMEOUT);
}
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
unsigned long wait_timeout = msecs_to_jiffies(4000);
unsigned long wait_expires = jiffies + wait_timeout;
int ret;
for (;;) {
/*
* If the driver provides a way for this, change to
* poll-waiting for the MST reply interrupt if we didn't receive
* it for 50 msec. This would cater for cases where the HPD
* pulse signal got lost somewhere, even though the sink raised
* the corresponding MST interrupt correctly. One example is the
* Club 3D CAC-1557 TypeC -> DP adapter which for some reason
* filters out short pulses with a duration less than ~540 usec.
*
* The poll period is 50 msec to avoid missing an interrupt
* after the sink has cleared it (after a 110msec timeout
* since it raised the interrupt).
*/
ret = wait_event_timeout(mgr->tx_waitq,
check_txmsg_state(mgr, txmsg),
mgr->cbs->poll_hpd_irq ?
msecs_to_jiffies(50) :
wait_timeout);
if (ret || !mgr->cbs->poll_hpd_irq ||
time_after(jiffies, wait_expires))
break;
mgr->cbs->poll_hpd_irq(mgr);
}
mutex_lock(&mgr->qlock);
if (ret > 0) {
if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
ret = -EIO;
goto out;
}
} else {
drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
txmsg, txmsg->state, txmsg->seqno);
/* dump some state */
ret = -EIO;
/* remove from q */
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
list_del(&txmsg->next);
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
struct drm_printer p = drm_debug_printer(DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
mutex_unlock(&mgr->qlock);
drm_dp_mst_kick_tx(mgr);
return ret;
}
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
{
struct drm_dp_mst_branch *mstb;
mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
if (!mstb)
return NULL;
mstb->lct = lct;
if (lct > 1)
memcpy(mstb->rad, rad, lct / 2);
INIT_LIST_HEAD(&mstb->ports);
kref_init(&mstb->topology_kref);
kref_init(&mstb->malloc_kref);
return mstb;
}
static void drm_dp_free_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb =
container_of(kref, struct drm_dp_mst_branch, malloc_kref);
if (mstb->port_parent)
drm_dp_mst_put_port_malloc(mstb->port_parent);
kfree(mstb);
}
/**
* DOC: Branch device and port refcounting
*
* Topology refcount overview
* ~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The refcounting schemes for &struct drm_dp_mst_branch and &struct
* drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
* two different kinds of refcounts: topology refcounts, and malloc refcounts.
*
* Topology refcounts are not exposed to drivers, and are handled internally
* by the DP MST helpers. The helpers use them in order to prevent the
* in-memory topology state from being changed in the middle of critical
* operations like changing the internal state of payload allocations. This
* means each branch and port will be considered to be connected to the rest
* of the topology until its topology refcount reaches zero. Additionally,
* for ports this means that their associated &struct drm_connector will stay
* registered with userspace until the port's refcount reaches 0.
*
* Malloc refcount overview
* ~~~~~~~~~~~~~~~~~~~~~~~~
*
* Malloc references are used to keep a &struct drm_dp_mst_port or &struct
* drm_dp_mst_branch allocated even after all of its topology references have
* been dropped, so that the driver or MST helpers can safely access each
* branch's last known state before it was disconnected from the topology.
* When the malloc refcount of a port or branch reaches 0, the memory
* allocation containing the &struct drm_dp_mst_branch or &struct
* drm_dp_mst_port respectively will be freed.
*
* For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
* to drivers. As of writing this documentation, there are no drivers that
* have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
* helpers. Exposing this API to drivers in a race-free manner would take more
* tweaking of the refcounting scheme, however patches are welcome provided
* there is a legitimate driver usecase for this.
*
* Refcount relationships in a topology
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Let's take a look at why the relationship between topology and malloc
* refcounts is designed the way it is.
*
* .. kernel-figure:: dp-mst/topology-figure-1.dot
*
* An example of topology and malloc refs in a DP MST topology with two
* active payloads. Topology refcount increments are indicated by solid
* lines, and malloc refcount increments are indicated by dashed lines.
* Each starts from the branch which incremented the refcount, and ends at
* the branch to which the refcount belongs to, i.e. the arrow points the
* same way as the C pointers used to reference a structure.
*
* As you can see in the above figure, every branch increments the topology
* refcount of its children, and increments the malloc refcount of its
* parent. Additionally, every payload increments the malloc refcount of its
* assigned port by 1.
*
* So, what would happen if MSTB #3 from the above figure was unplugged from
* the system, but the driver hadn't yet removed payload #2 from port #3? The
* topology would start to look like the figure below.
*
* .. kernel-figure:: dp-mst/topology-figure-2.dot
*
* Ports and branch devices which have been released from memory are
* colored grey, and references which have been removed are colored red.
*
* Whenever a port or branch device's topology refcount reaches zero, it will
* decrement the topology refcounts of all its children, the malloc refcount
* of its parent, and finally its own malloc refcount. For MSTB #4 and port
* #4, this means they both have been disconnected from the topology and freed
* from memory. But, because payload #2 is still holding a reference to port
* #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
* is still accessible from memory. This also means port #3 has not yet
* decremented the malloc refcount of MSTB #3, so its &struct
* drm_dp_mst_branch will also stay allocated in memory until port #3's
* malloc refcount reaches 0.
*
* This relationship is necessary because in order to release payload #2, we
* need to be able to figure out the last relative of port #3 that's still
* connected to the topology. In this case, we would travel up the topology as
* shown below.
*
* .. kernel-figure:: dp-mst/topology-figure-3.dot
*
* And finally, remove payload #2 by communicating with port #2 through
* sideband transactions.
*/
/**
* drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
* device
* @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
*
* Increments &drm_dp_mst_branch.malloc_kref. When
* &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
* will be released and @mstb may no longer be used.
*
* See also: drm_dp_mst_put_mstb_malloc()
*/
static void
drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
{
kref_get(&mstb->malloc_kref);
drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
}
/**
* drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
* device
* @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
*
* Decrements &drm_dp_mst_branch.malloc_kref. When
* &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
* will be released and @mstb may no longer be used.
*
* See also: drm_dp_mst_get_mstb_malloc()
*/
static void
drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
{
drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
}
static void drm_dp_free_mst_port(struct kref *kref)
{
struct drm_dp_mst_port *port =
container_of(kref, struct drm_dp_mst_port, malloc_kref);
drm_dp_mst_put_mstb_malloc(port->parent);
kfree(port);
}
/**
* drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
* @port: The &struct drm_dp_mst_port to increment the malloc refcount of
*
* Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
* reaches 0, the memory allocation for @port will be released and @port may
* no longer be used.
*
* Because @port could potentially be freed at any time by the DP MST helpers
* if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
* function, drivers that which to make use of &struct drm_dp_mst_port should
* ensure that they grab at least one main malloc reference to their MST ports
* in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
* there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
*
* See also: drm_dp_mst_put_port_malloc()
*/
void
drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
{
kref_get(&port->malloc_kref);
drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
}
EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
/**
* drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
* @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
*
* Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
* reaches 0, the memory allocation for @port will be released and @port may
* no longer be used.
*
* See also: drm_dp_mst_get_port_malloc()
*/
void
drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
{
drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
kref_put(&port->malloc_kref, drm_dp_free_mst_port);
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#define STACK_DEPTH 8
static noinline void
__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_ref_history *history,
enum drm_dp_mst_topology_ref_type type)
{
struct drm_dp_mst_topology_ref_entry *entry = NULL;
depot_stack_handle_t backtrace;
ulong stack_entries[STACK_DEPTH];
uint n;
int i;
n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
if (!backtrace)
return;
/* Try to find an existing entry for this backtrace */
for (i = 0; i < history->len; i++) {
if (history->entries[i].backtrace == backtrace) {
entry = &history->entries[i];
break;
}
}
/* Otherwise add one */
if (!entry) {
struct drm_dp_mst_topology_ref_entry *new;
int new_len = history->len + 1;
new = krealloc(history->entries, sizeof(*new) * new_len,
GFP_KERNEL);
if (!new)
return;
entry = &new[history->len];
history->len = new_len;
history->entries = new;
entry->backtrace = backtrace;
entry->type = type;
entry->count = 0;
}
entry->count++;
entry->ts_nsec = ktime_get_ns();
}
static int
topology_ref_history_cmp(const void *a, const void *b)
{
const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
if (entry_a->ts_nsec > entry_b->ts_nsec)
return 1;
else if (entry_a->ts_nsec < entry_b->ts_nsec)
return -1;
else
return 0;
}
static inline const char *
topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
{
if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
return "get";
else
return "put";
}
static void
__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
void *ptr, const char *type_str)
{
struct drm_printer p = drm_debug_printer(DBG_PREFIX);
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
int i;
if (!buf)
return;
if (!history->len)
goto out;
/* First, sort the list so that it goes from oldest to newest
* reference entry
*/
sort(history->entries, history->len, sizeof(*history->entries),
topology_ref_history_cmp, NULL);
drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
type_str, ptr);
for (i = 0; i < history->len; i++) {
const struct drm_dp_mst_topology_ref_entry *entry =
&history->entries[i];
u64 ts_nsec = entry->ts_nsec;
u32 rem_nsec = do_div(ts_nsec, 1000000000);
stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
entry->count,
topology_ref_type_to_str(entry->type),
ts_nsec, rem_nsec / 1000, buf);
}
/* Now free the history, since this is the only time we expose it */
kfree(history->entries);
out:
kfree(buf);
}
static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
{
__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
"MSTB");
}
static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
{
__dump_topology_ref_history(&port->topology_ref_history, port,
"Port");
}
static __always_inline void
save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
enum drm_dp_mst_topology_ref_type type)
{
__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
}
static __always_inline void
save_port_topology_ref(struct drm_dp_mst_port *port,
enum drm_dp_mst_topology_ref_type type)
{
__topology_ref_save(port->mgr, &port->topology_ref_history, type);
}
static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->topology_ref_history_lock);
}
static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_unlock(&mgr->topology_ref_history_lock);
}
#else
static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
static inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
static inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
#define save_mstb_topology_ref(mstb, type)
#define save_port_topology_ref(port, type)
#endif
struct drm_dp_mst_atomic_payload *
drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
struct drm_dp_mst_port *port)
{
struct drm_dp_mst_atomic_payload *payload;
list_for_each_entry(payload, &state->payloads, next)
if (payload->port == port)
return payload;
return NULL;
}
EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb =
container_of(kref, struct drm_dp_mst_branch, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
drm_dp_mst_dump_mstb_topology_history(mstb);
INIT_LIST_HEAD(&mstb->destroy_next);
/*
* This can get called under mgr->mutex, so we need to perform the
* actual destruction of the mstb in another worker
*/
mutex_lock(&mgr->delayed_destroy_lock);
list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
mutex_unlock(&mgr->delayed_destroy_lock);
queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
}
/**
* drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
* branch device unless it's zero
* @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
*
* Attempts to grab a topology reference to @mstb, if it hasn't yet been
* removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
* reached 0). Holding a topology reference implies that a malloc reference
* will be held to @mstb as long as the user holds the topology reference.
*
* Care should be taken to ensure that the user has at least one malloc
* reference to @mstb. If you already have a topology reference to @mstb, you
* should use drm_dp_mst_topology_get_mstb() instead.
*
* See also:
* drm_dp_mst_topology_get_mstb()
* drm_dp_mst_topology_put_mstb()
*
* Returns:
* * 1: A topology reference was grabbed successfully
* * 0: @port is no longer in the topology, no reference was grabbed
*/
static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
int ret;
topology_ref_history_lock(mstb->mgr);
ret = kref_get_unless_zero(&mstb->topology_kref);
if (ret) {
drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
}
topology_ref_history_unlock(mstb->mgr);
return ret;
}
/**
* drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
* branch device
* @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
*
* Increments &drm_dp_mst_branch.topology_refcount without checking whether or
* not it's already reached 0. This is only valid to use in scenarios where
* you are already guaranteed to have at least one active topology reference
* to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
*
* See also:
* drm_dp_mst_topology_try_get_mstb()
* drm_dp_mst_topology_put_mstb()
*/
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{
topology_ref_history_lock(mstb->mgr);
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
topology_ref_history_unlock(mstb->mgr);
}
/**
* drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
* device
* @mstb: The &struct drm_dp_mst_branch to release the topology reference from
*
* Releases a topology reference from @mstb by decrementing
* &drm_dp_mst_branch.topology_kref.
*
* See also:
* drm_dp_mst_topology_try_get_mstb()
* drm_dp_mst_topology_get_mstb()
*/
static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
topology_ref_history_lock(mstb->mgr);
drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
topology_ref_history_unlock(mstb->mgr);
kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
}
static void drm_dp_destroy_port(struct kref *kref)
{
struct drm_dp_mst_port *port =
container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
drm_dp_mst_dump_port_topology_history(port);
/* There's nothing that needs locking to destroy an input port yet */
if (port->input) {
drm_dp_mst_put_port_malloc(port);
return;
}
drm_edid_free(port->cached_edid);
/*
* we can't destroy the connector here, as we might be holding the
* mode_config.mutex from an EDID retrieval
*/
mutex_lock(&mgr->delayed_destroy_lock);
list_add(&port->next, &mgr->destroy_port_list);
mutex_unlock(&mgr->delayed_destroy_lock);
queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
}
/**
* drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
* port unless it's zero
* @port: &struct drm_dp_mst_port to increment the topology refcount of
*
* Attempts to grab a topology reference to @port, if it hasn't yet been
* removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
* 0). Holding a topology reference implies that a malloc reference will be
* held to @port as long as the user holds the topology reference.
*
* Care should be taken to ensure that the user has at least one malloc
* reference to @port. If you already have a topology reference to @port, you
* should use drm_dp_mst_topology_get_port() instead.
*
* See also:
* drm_dp_mst_topology_get_port()
* drm_dp_mst_topology_put_port()
*
* Returns:
* * 1: A topology reference was grabbed successfully
* * 0: @port is no longer in the topology, no reference was grabbed
*/
static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
int ret;
topology_ref_history_lock(port->mgr);
ret = kref_get_unless_zero(&port->topology_kref);
if (ret) {
drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
}
topology_ref_history_unlock(port->mgr);
return ret;
}
/**
* drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
* @port: The &struct drm_dp_mst_port to increment the topology refcount of
*
* Increments &drm_dp_mst_port.topology_refcount without checking whether or
* not it's already reached 0. This is only valid to use in scenarios where
* you are already guaranteed to have at least one active topology reference
* to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
*
* See also:
* drm_dp_mst_topology_try_get_port()
* drm_dp_mst_topology_put_port()
*/
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{
topology_ref_history_lock(port->mgr);
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
topology_ref_history_unlock(port->mgr);
}
/**
* drm_dp_mst_topology_put_port() - release a topology reference to a port
* @port: The &struct drm_dp_mst_port to release the topology reference from
*
* Releases a topology reference from @port by decrementing
* &drm_dp_mst_port.topology_kref.
*
* See also:
* drm_dp_mst_topology_try_get_port()
* drm_dp_mst_topology_get_port()
*/
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
topology_ref_history_lock(port->mgr);
drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port);
}
static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_branch *to_find)
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *rmstb;
if (to_find == mstb)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
if (port->mstb) {
rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
port->mstb, to_find);
if (rmstb)
return rmstb;
}
}
return NULL;
}
static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_branch *rmstb = NULL;
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
mgr->mst_primary, mstb);
if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
rmstb = NULL;
}
mutex_unlock(&mgr->lock);
return rmstb;
}
static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *to_find)
{
struct drm_dp_mst_port *port, *mport;
list_for_each_entry(port, &mstb->ports, next) {
if (port == to_find)
return port;
if (port->mstb) {
mport = drm_dp_mst_topology_get_port_validated_locked(
port->mstb, to_find);
if (mport)
return mport;
}
}
return NULL;
}
static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *rport = NULL;
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
rport = drm_dp_mst_topology_get_port_validated_locked(
mgr->mst_primary, port);
if (rport && !drm_dp_mst_topology_try_get_port(rport))
rport = NULL;
}
mutex_unlock(&mgr->lock);
return rport;
}
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
{
struct drm_dp_mst_port *port;
int ret;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
ret = drm_dp_mst_topology_try_get_port(port);
return ret ? port : NULL;
}
}
return NULL;
}
/*
* calculate a new RAD for this MST branch device
* if parent has an LCT of 2 then it has 1 nibble of RAD,
* if parent has an LCT of 3 then it has 2 nibbles of RAD,
*/
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad)
{
int parent_lct = port->parent->lct;
int shift = 4;
int idx = (parent_lct - 1) / 2;
if (parent_lct > 1) {
memcpy(rad, port->parent->rad, idx + 1);
shift = (parent_lct % 2) ? 4 : 0;
} else
rad[0] = 0;
rad[idx] |= port->port_num << shift;
return parent_lct + 1;
}
static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
{
switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
return true;
case DP_PEER_DEVICE_MST_BRANCHING:
/* For sst branch device */
if (!mcs)
return true;
return false;
}
return true;
}
static int
drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
bool new_mcs)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb;
u8 rad[8], lct;
int ret = 0;
if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0;
/* Teardown the old pdt, if there is one */
if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/*
* If the new PDT would also have an i2c bus,
* don't bother with reregistering it
*/
if (new_pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
port->pdt = new_pdt;
port->mcs = new_mcs;
return 0;
}
/* remove i2c over sideband */
drm_dp_mst_unregister_i2c_bus(port);
} else {
mutex_lock(&mgr->lock);
drm_dp_mst_topology_put_mstb(port->mstb);
port->mstb = NULL;
mutex_unlock(&mgr->lock);
}
}
port->pdt = new_pdt;
port->mcs = new_mcs;
if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(port);
} else {
lct = drm_dp_calculate_rad(port, rad);
mstb = drm_dp_add_mst_branch_device(lct, rad);
if (!mstb) {
ret = -ENOMEM;
drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
goto out;
}
mutex_lock(&mgr->lock);
port->mstb = mstb;
mstb->mgr = port->mgr;
mstb->port_parent = port;
/*
* Make sure this port's memory allocation stays
* around until its child MSTB releases it
*/
drm_dp_mst_get_port_malloc(port);
mutex_unlock(&mgr->lock);
/* And make sure we send a link address for this */
ret = 1;
}
}
out:
if (ret < 0)
port->pdt = DP_PEER_DEVICE_NONE;
return ret;
}
/**
* drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
* @aux: Fake sideband AUX CH
* @offset: address of the (first) register to read
* @buffer: buffer to store the register values
* @size: number of bytes in @buffer
*
* Performs the same functionality for remote devices via
* sideband messaging as drm_dp_dpcd_read() does for local
* devices via actual AUX CH.
*
* Return: Number of bytes read, or negative error code on failure.
*/
ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size)
{
struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
aux);
return drm_dp_send_dpcd_read(port->mgr, port,
offset, size, buffer);
}
/**
* drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
* @aux: Fake sideband AUX CH
* @offset: address of the (first) register to write
* @buffer: buffer containing the values to write
* @size: number of bytes in @buffer
*
* Performs the same functionality for remote devices via
* sideband messaging as drm_dp_dpcd_write() does for local
* devices via actual AUX CH.
*
* Return: number of bytes written on success, negative error code on failure.
*/
ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size)
{
struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
aux);
return drm_dp_send_dpcd_write(port->mgr, port,
offset, size, buffer);
}
static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret = 0;
memcpy(mstb->guid, guid, 16);
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
mstb->port_parent,
DP_GUID, 16, mstb->guid);
} else {
ret = drm_dp_dpcd_write(mstb->mgr->aux,
DP_GUID, mstb->guid, 16);
}
}
if (ret < 16 && ret > 0)
return -EPROTO;
return ret == 16 ? 0 : ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
int pnum,
char *proppath,
size_t proppath_size)
{
int i;
char temp[8];
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
snprintf(temp, sizeof(temp), "-%d", pnum);
strlcat(proppath, temp, proppath_size);
}
/**
* drm_dp_mst_connector_late_register() - Late MST connector registration
* @connector: The MST connector
* @port: The MST port for this connector
*
* Helper to register the remote aux device for this MST port. Drivers should
* call this from their mst connector's late_register hook to enable MST aux
* devices.
*
* Return: 0 on success, negative error code on failure.
*/
int drm_dp_mst_connector_late_register(struct drm_connector *connector,
struct drm_dp_mst_port *port)
{
drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
port->aux.name, connector->kdev->kobj.name);
port->aux.dev = connector->kdev;
return drm_dp_aux_register_devnode(&port->aux);
}
EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
/**
* drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
* @connector: The MST connector
* @port: The MST port for this connector
*
* Helper to unregister the remote aux device for this MST port, registered by
* drm_dp_mst_connector_late_register(). Drivers should call this from their mst
* connector's early_unregister hook.
*/
void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
struct drm_dp_mst_port *port)
{
drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
port->aux.name, connector->kdev->kobj.name);
drm_dp_aux_unregister_devnode(&port->aux);
}
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
static void
drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
char proppath[255];
int ret;
build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
port->connector = mgr->cbs->add_connector(mgr, port, proppath);
if (!port->connector) {
ret = -ENOMEM;
goto error;
}
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
port->port_num >= DP_MST_LOGICAL_PORT_0)
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
drm_connector_register(port->connector);
return;
error:
drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
}
/*
* Drop a topology reference, and unlink the port from the in-memory topology
* layout
*/
static void
drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
mutex_lock(&mgr->lock);
port->parent->num_ports--;
list_del(&port->next);
mutex_unlock(&mgr->lock);
drm_dp_mst_topology_put_port(port);
}
static struct drm_dp_mst_port *
drm_dp_mst_add_port(struct drm_device *dev,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb, u8 port_number)
{
struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return NULL;
kref_init(&port->topology_kref);
kref_init(&port->malloc_kref);
port->parent = mstb;
port->port_num = port_number;
port->mgr = mgr;
port->aux.name = "DPMST";
port->aux.dev = dev->dev;
port->aux.is_remote = true;
/* initialize the MST downstream port's AUX crc work queue */
port->aux.drm_dev = dev;
drm_dp_remote_aux_init(&port->aux);
/*
* Make sure the memory allocation for our parent branch stays
* around until our own memory allocation is released
*/
drm_dp_mst_get_mstb_malloc(mstb);
return port;
}
static int
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_device *dev,
struct drm_dp_link_addr_reply_port *port_msg)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
port = drm_dp_mst_add_port(dev, mgr, mstb,
port_msg->port_number);
if (!port)
return -ENOMEM;
created = true;
changed = true;
} else if (!port->input && port_msg->input_port && port->connector) {
/* Since port->connector can't be changed here, we create a
* new port if input_port changes from 0 to 1
*/
drm_dp_mst_topology_unlink_port(mgr, port);
drm_dp_mst_topology_put_port(port);
port = drm_dp_mst_add_port(dev, mgr, mstb,
port_msg->port_number);
if (!port)
return -ENOMEM;
changed = true;
created = true;
} else if (port->input && !port_msg->input_port) {
changed = true;
} else if (port->connector) {
/* We're updating a port that's exposed to userspace, so do it
* under lock
*/
drm_modeset_lock(&mgr->base.lock, NULL);
old_ddps = port->ddps;
changed = port->ddps != port_msg->ddps ||
(port->ddps &&
(port->ldps != port_msg->legacy_device_plug_status ||
port->dpcd_rev != port_msg->dpcd_revision ||
port->mcs != port_msg->mcs ||
port->pdt != port_msg->peer_device_type ||
port->num_sdp_stream_sinks !=
port_msg->num_sdp_stream_sinks));
}
port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
mstb->num_ports++;
mutex_unlock(&mgr->lock);
}
/*
* Reprobe PBN caps on both hotplug, and when re-probing the link
* for our parent mstb
*/
if (old_ddps != port->ddps || !created) {
if (port->ddps && !port->input) {
ret = drm_dp_send_enum_path_resources(mgr, mstb,
port);
if (ret == 1)
changed = true;
} else {
port->full_pbn = 0;
}
}
ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
goto fail;
}
/*
* If this port wasn't just created, then we're reprobing because
* we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port
*/
if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
port->mcs)
send_link_addr = true;
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (!port->input)
drm_dp_mst_port_add_connector(mstb, port);
if (send_link_addr && port->mstb) {
ret = drm_dp_send_link_address(mgr, port->mstb);
if (ret == 1) /* MSTB below us changed */
changed = true;
else if (ret < 0)
goto fail_put;
}
/* put reference to this port */
drm_dp_mst_topology_put_port(port);
return changed;
fail:
drm_dp_mst_topology_unlink_port(mgr, port);
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
fail_put:
drm_dp_mst_topology_put_port(port);
return ret;
}
static int
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_connection_status_notify *conn_stat)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
int old_ddps, ret;
u8 new_pdt;
bool new_mcs;
bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return 0;
if (port->connector) {
if (!port->input && conn_stat->input_port) {
/*
* We can't remove a connector from an already exposed
* port, so just throw the port out and make sure we
* reprobe the link address of it's parent MSTB
*/
drm_dp_mst_topology_unlink_port(mgr, port);
mstb->link_address_sent = false;
dowork = true;
goto out;
}
/* Locking is only needed if the port's exposed to userspace */
drm_modeset_lock(&mgr->base.lock, NULL);
} else if (port->input && !conn_stat->input_port) {
create_connector = true;
/* Reprobe link address so we get num_sdp_streams */
mstb->link_address_sent = false;
dowork = true;
}
old_ddps = port->ddps;
port->input = conn_stat->input_port;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) {
if (port->ddps && !port->input)
drm_dp_send_enum_path_resources(mgr, mstb, port);
else
port->full_pbn = 0;
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
new_mcs = conn_stat->message_capability_status;
ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
dowork = false;
}
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
drm_dp_mst_port_add_connector(mstb, port);
out:
drm_dp_mst_topology_put_port(port);
return dowork;
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
u8 lct, u8 *rad)
{
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_port *port;
int i, ret;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (!mstb)
goto out;
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
mstb = port->mstb;
if (!mstb) {
drm_err(mgr->dev,
"failed to lookup MSTB with lct %d, rad %02x\n",
lct, rad[0]);
goto out;
}
break;
}
}
}
ret = drm_dp_mst_topology_try_get_mstb(mstb);
if (!ret)
mstb = NULL;
out:
mutex_unlock(&mgr->lock);
return mstb;
}
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
const uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
return found_mstb;
}
return NULL;
}
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
const uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
if (mstb) {
ret = drm_dp_mst_topology_try_get_mstb(mstb);
if (!ret)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
return mstb;
}
static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
int ret;
bool changed = false;
if (!mstb->link_address_sent) {
ret = drm_dp_send_link_address(mgr, mstb);
if (ret == 1)
changed = true;
else if (ret < 0)
return ret;
}
list_for_each_entry(port, &mstb->ports, next) {
if (port->input || !port->ddps || !port->mstb)
continue;
ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
if (ret == 1)
changed = true;
else if (ret < 0)
return ret;
}
return changed;
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr =
container_of(work, struct drm_dp_mst_topology_mgr, work);
struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
bool clear_payload_id_table;
mutex_lock(&mgr->probe_lock);
mutex_lock(&mgr->lock);
clear_payload_id_table = !mgr->payload_id_table_cleared;
mgr->payload_id_table_cleared = true;
mstb = mgr->mst_primary;
if (mstb) {
ret = drm_dp_mst_topology_try_get_mstb(mstb);
if (!ret)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
if (!mstb) {
mutex_unlock(&mgr->probe_lock);
return;
}
/*
* Certain branch devices seem to incorrectly report an available_pbn
* of 0 on downstream sinks, even after clearing the
* DP_PAYLOAD_ALLOCATE_* registers in
* drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
* 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
* things work again.
*/
if (clear_payload_id_table) {
drm_dbg_kms(dev, "Clearing payload ID table\n");
drm_dp_send_clear_payload_id_table(mgr, mstb);
}
ret = drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_mst_topology_put_mstb(mstb);
mutex_unlock(&mgr->probe_lock);
if (ret > 0)
drm_kms_helper_hotplug_event(dev);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid)
{
u64 salt;
if (memchr_inv(guid, 0, 16))
return true;
salt = get_jiffies_64();
memcpy(&guid[0], &salt, sizeof(u64));
memcpy(&guid[8], &salt, sizeof(u64));
return false;
}
static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_REMOTE_DPCD_READ;
req.u.dpcd_read.port_number = port_num;
req.u.dpcd_read.dpcd_address = offset;
req.u.dpcd_read.num_bytes = num_bytes;
drm_dp_encode_sideband_req(&req, msg);
}
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
bool up, u8 *msg, int len)
{
int ret;
int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
int tosend, total, offset;
int retries = 0;
retry:
total = len;
offset = 0;
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
&msg[offset],
tosend);
if (ret != tosend) {
if (ret == -EIO && retries < 5) {
retries++;
goto retry;
}
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
}
offset += tosend;
total -= tosend;
} while (total > 0);
return 0;
}
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_branch *mstb = txmsg->dst;
u8 req_type;
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY ||
req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
hdr->broadcast = 1;
else
hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
if (hdr->broadcast) {
hdr->lct = 1;
hdr->lcr = 6;
} else {
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
}
memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
return 0;
}
/*
* process a single block of the next message in the sideband queue
*/
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg,
bool up)
{
u8 chunk[48];
struct drm_dp_sideband_msg_hdr hdr;
int len, space, idx, tosend;
int ret;
if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
return 0;
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
/* make hdr from dst mst */
ret = set_hdr_from_dst_qlock(&hdr, txmsg);
if (ret < 0)
return ret;
/* amount left to send in this message */
len = txmsg->cur_len - txmsg->cur_offset;
/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
tosend = min(len, space);
if (len == txmsg->cur_len)
hdr.somt = 1;
if (space >= len)
hdr.eomt = 1;
hdr.msg_len = tosend + 1;
drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
/* add crc at end */
drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
idx += tosend + 1;
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
if (ret) {
if (drm_debug_enabled(DRM_UT_DP)) {
struct drm_printer p = drm_debug_printer(DBG_PREFIX);
drm_printf(&p, "sideband msg failed to send\n");
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
return ret;
}
txmsg->cur_offset += tosend;
if (txmsg->cur_offset == txmsg->cur_len) {
txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
return 1;
}
return 0;
}
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
WARN_ON(!mutex_is_locked(&mgr->qlock));
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_downq))
return;
txmsg = list_first_entry(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
list_del(&txmsg->next);
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up_all(&mgr->tx_waitq);
}
}
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (drm_debug_enabled(DRM_UT_DP)) {
struct drm_printer p = drm_debug_printer(DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
static void
drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_link_address_ack_reply *reply)
{
struct drm_dp_link_addr_reply_port *port_reply;
int i;
for (i = 0; i < reply->nports; i++) {
port_reply = &reply->ports[i];
drm_dbg_kms(mgr->dev,
"port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
i,
port_reply->input_port,
port_reply->peer_device_type,
port_reply->port_number,
port_reply->dpcd_revision,
port_reply->mcs,
port_reply->ddps,
port_reply->legacy_device_plug_status,
port_reply->num_sdp_streams,
port_reply->num_sdp_stream_sinks);
}
}
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply;
struct drm_dp_mst_port *port, *tmp;
int i, ret, port_mask = 0;
bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
txmsg->dst = mstb;
build_link_address(txmsg);
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
/* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret <= 0) {
drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
goto out;
}
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
drm_err(mgr->dev, "link address NAK received\n");
ret = -EIO;
goto out;
}
reply = &txmsg->reply.u.link_addr;
drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(mgr, reply);
ret = drm_dp_check_mstb_guid(mstb, reply->guid);
if (ret) {
char buf[64];
drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
goto out;
}
for (i = 0; i < reply->nports; i++) {
port_mask |= BIT(reply->ports[i].port_number);
ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
&reply->ports[i]);
if (ret == 1)
changed = true;
else if (ret < 0)
goto out;
}
/* Prune any ports that are currently a part of mstb in our in-memory
* topology, but were not seen in this link address. Usually this
* means that they were removed while the topology was out of sync,
* e.g. during suspend/resume
*/
mutex_lock(&mgr->lock);
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
if (port_mask & BIT(port->port_num))
continue;
drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
port->port_num);
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
changed = true;
}
mutex_unlock(&mgr->lock);
out:
if (ret <= 0)
mstb->link_address_sent = false;
kfree(txmsg);
return ret < 0 ? ret : changed;
}
static void
drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return;
txmsg->dst = mstb;
build_clear_payload_id_table(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
kfree(txmsg);
}
static int
drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port)
{
struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
txmsg->dst = mstb;
build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
ret = 0;
path_res = &txmsg->reply.u.path_resources;
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
} else {
if (port->port_num != path_res->port_number)
DRM_ERROR("got incorrect port in response\n");
drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
path_res->port_number,
path_res->full_payload_bw_number,
path_res->avail_payload_bw_number);
/*
* If something changed, make sure we send a
* hotplug
*/
if (port->full_pbn != path_res->full_payload_bw_number ||
port->fec_capable != path_res->fec_capable)
ret = 1;
port->full_pbn = path_res->full_payload_bw_number;
port->fec_capable = path_res->fec_capable;
}
}
kfree(txmsg);
return ret;
}
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
{
if (!mstb->port_parent)
return NULL;
if (mstb->port_parent->mstb != mstb)
return mstb->port_parent;
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
}
/*
* Searches upwards in the topology starting from mstb to try to find the
* closest available parent of mstb that's still connected to the rest of the
* topology. This can be used in order to perform operations like releasing
* payloads, where the branch device which owned the payload may no longer be
* around and thus would require that the payload on the last living relative
* be freed instead.
*/
static struct drm_dp_mst_branch *
drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
int *port_num)
{
struct drm_dp_mst_branch *rmstb = NULL;
struct drm_dp_mst_port *found_port;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out;
do {
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
if (!found_port)
break;
if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
rmstb = found_port->parent;
*port_num = found_port->port_num;
} else {
/* Search again, starting from this parent */
mstb = found_port->parent;
}
} while (!rmstb);
out:
mutex_unlock(&mgr->lock);
return rmstb;
}
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
int pbn)
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
int ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
port_num = port->port_num;
mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
port->parent,
&port_num);
if (!mstb)
return -EINVAL;
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto fail_put;
}
for (i = 0; i < port->num_sdp_streams; i++)
sinks[i] = i;
txmsg->dst = mstb;
build_allocate_payload(txmsg, port_num,
id,
pbn, port->num_sdp_streams, sinks);
drm_dp_queue_down_tx(mgr, txmsg);
/*
* FIXME: there is a small chance that between getting the last
* connected mstb and sending the payload message, the last connected
* mstb could also be removed from the topology. In the future, this
* needs to be fixed by restarting the
* drm_dp_get_last_connected_port_and_mstb() search in the event of a
* timeout if the topology is still connected to the system.
*/
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EINVAL;
else
ret = 0;
}
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
drm_dp_mst_topology_put_port(port);
return -ENOMEM;
}
txmsg->dst = port->parent;
build_power_updown_phy(txmsg, port->port_num, power_up);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EINVAL;
else
ret = 0;
}
kfree(txmsg);
drm_dp_mst_topology_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status)
{
struct drm_dp_mst_topology_state *state;
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_sideband_msg_tx *txmsg;
u8 nonce[7];
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port) {
ret = -EINVAL;
goto out_get_port;
}
get_random_bytes(nonce, sizeof(nonce));
drm_modeset_lock(&mgr->base.lock, NULL);
state = to_drm_dp_mst_topology_state(mgr->base.state);
payload = drm_atomic_get_mst_payload_state(state, port);
/*
* "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
* transaction at the MST Branch device directly connected to the
* Source"
*/
txmsg->dst = mgr->mst_primary;
build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
if (ret < 0) {
goto out;
} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
ret = -ENXIO;
goto out;
}
ret = 0;
memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
out:
drm_modeset_unlock(&mgr->base.lock);
drm_dp_mst_topology_put_port(port);
out_get_port:
kfree(txmsg);
return ret;
}
EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
payload->time_slots);
}
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
int ret;
struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
if (!port)
return -EIO;
ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
drm_dp_mst_topology_put_port(port);
return ret;
}
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload)
{
drm_dbg_kms(mgr->dev, "\n");
/* it's okay for these to fail */
drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
return 0;
}
/**
* drm_dp_add_payload_part1() - Execute payload update part 1
* @mgr: Manager to use.
* @mst_state: The MST atomic state
* @payload: The payload to write
*
* Determines the starting time slot for the given payload, and programs the VCPI for this payload
* into hardware. After calling this, the driver should generate ACT and payload packets.
*
* Returns: 0 on success, error code on failure. In the event that this fails,
* @payload.vc_start_slot will also be set to -1.
*/
int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload)
{
struct drm_dp_mst_port *port;
int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
if (!port) {
drm_dbg_kms(mgr->dev,
"VCPI %d for port %p not in topology, not creating a payload\n",
payload->vcpi, payload->port);
payload->vc_start_slot = -1;
return 0;
}
if (mgr->payload_count == 0)
mgr->next_start_slot = mst_state->start_slot;
payload->vc_start_slot = mgr->next_start_slot;
ret = drm_dp_create_payload_step1(mgr, payload);
drm_dp_mst_topology_put_port(port);
if (ret < 0) {
drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
payload->port, ret);
payload->vc_start_slot = -1;
return ret;
}
mgr->payload_count++;
mgr->next_start_slot += payload->time_slots;
return 0;
}
EXPORT_SYMBOL(drm_dp_add_payload_part1);
/**
* drm_dp_remove_payload() - Remove an MST payload
* @mgr: Manager to use.
* @mst_state: The MST atomic state
* @old_payload: The payload with its old state
* @new_payload: The payload to write
*
* Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
* the starting time slots of all other payloads which would have been shifted towards the start of
* the VC table as a result. After calling this, the driver should generate ACT and payload packets.
*/
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
const struct drm_dp_mst_atomic_payload *old_payload,
struct drm_dp_mst_atomic_payload *new_payload)
{
struct drm_dp_mst_atomic_payload *pos;
bool send_remove = false;
/* We failed to make the payload, so nothing to do */
if (new_payload->vc_start_slot == -1)
return;
mutex_lock(&mgr->lock);
send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
mutex_unlock(&mgr->lock);
if (send_remove)
drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
else
drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
new_payload->vcpi);
list_for_each_entry(pos, &mst_state->payloads, next) {
if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
pos->vc_start_slot -= old_payload->time_slots;
}
new_payload->vc_start_slot = -1;
mgr->payload_count--;
mgr->next_start_slot -= old_payload->time_slots;
if (new_payload->delete)
drm_dp_mst_put_port_malloc(new_payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);
/**
* drm_dp_add_payload_part2() - Execute payload update part 2
* @mgr: Manager to use.
* @state: The global atomic state
* @payload: The payload to update
*
* If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
* function will send the sideband messages to finish allocating this payload.
*
* Returns: 0 on success, negative error code on failure.
*/
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload)
{
int ret = 0;
/* Skip failed payloads */
if (payload->vc_start_slot == -1) {
drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name);
return -EIO;
}
ret = drm_dp_create_payload_step2(mgr, payload);
if (ret < 0) {
if (!payload->delete)
drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
payload->port, ret);
else
drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
payload->port, ret);
}
return ret;
}
EXPORT_SYMBOL(drm_dp_add_payload_part2);
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto fail_put;
}
build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret < 0)
goto fail_free;
if (txmsg->reply.reply_type == 1) {
drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
mstb, port->port_num, offset, size);
ret = -EIO;
goto fail_free;
}
if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
ret = -EPROTO;
goto fail_free;
}
ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
size);
memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
fail_free:
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
int ret;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto fail_put;
}
build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EIO;
else
ret = size;
}
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
{
struct drm_dp_sideband_msg_reply_body reply;
reply.reply_type = DP_SIDEBAND_REPLY_ACK;
reply.req_type = req_type;
drm_dp_encode_sideband_reply(&reply, msg);
return 0;
}
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
int req_type, bool broadcast)
{
struct drm_dp_sideband_msg_tx *txmsg;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
txmsg->dst = mstb;
drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock);
/* construct a chunk from the first msg in the tx_msg queue */
process_single_tx_qlock(mgr, txmsg, true);
mutex_unlock(&mgr->qlock);
kfree(txmsg);
return 0;
}
/**
* drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
* @mgr: The &drm_dp_mst_topology_mgr to use
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
* Calculate the total bandwidth of a MultiStream Transport link. The returned
* value is in units of PBNs/(timeslots/1 MTP). This value can be used to
* convert the number of PBNs required for a given stream to the number of
* timeslots this stream requires in each MTP.
*/
int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
int link_rate, int link_lane_count)
{
if (link_rate == 0 || link_lane_count == 0)
drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
link_rate, link_lane_count);
/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
return link_rate * link_lane_count / 54000;
}
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
* @aux: The DP AUX channel to use
* @dpcd: A cached copy of the DPCD capabilities for this sink
*
* Returns: %True if the sink supports MST, %false otherwise
*/
bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 mstm_cap;
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return false;
if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
return false;
return mstm_cap & DP_MST_CAP;
}
EXPORT_SYMBOL(drm_dp_read_mst_cap);
/**
* drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
* @mgr: manager to set state for
* @mst_state: true to enable MST on this connector - false to disable.
*
* This is called by the driver when it detects an MST capable device plugged
* into a DP MST capable port, or when a DP MST capable device is unplugged.
*/
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
struct drm_dp_mst_branch *mstb = NULL;
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
WARN_ON(mgr->mst_primary);
/* get dpcd info */
ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
mgr->aux->name, ret);
goto out_unlock;
}
/* add initial branch device at LCT 1 */
mstb = drm_dp_add_mst_branch_device(1, NULL);
if (mstb == NULL) {
ret = -ENOMEM;
goto out_unlock;
}
mstb->mgr = mgr;
/* give this the main reference */
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
/* Write reset payload */
drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
queue_work(system_long_wq, &mgr->work);
ret = 0;
} else {
/* disable MST on the device */
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
}
out_unlock:
mutex_unlock(&mgr->lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
static void
drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
/* The link address will need to be re-sent on resume */
mstb->link_address_sent = false;
list_for_each_entry(port, &mstb->ports, next)
if (port->mstb)
drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
}
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
*
* This function tells the MST device that we can't handle UP messages
* anymore. This should stop it from sending any since we are suspended.
*/
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
flush_work(&mgr->delayed_destroy_work);
mutex_lock(&mgr->lock);
if (mgr->mst_state && mgr->mst_primary)
drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
* @sync: whether or not to perform topology reprobing synchronously
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
* If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
*
* During system resume (where it is assumed that the driver will be calling
* drm_atomic_helper_resume()) this function should be called beforehand with
* @sync set to true. In contexts like runtime resume where the driver is not
* expected to be calling drm_atomic_helper_resume(), this function should be
* called with @sync set to false in order to avoid deadlocking.
*
* Returns: -1 if the MST topology was removed while we were suspended, 0
* otherwise.
*/
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
bool sync)
{
int ret;
u8 guid[16];
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out_fail;
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
if (ret) {
drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
goto out_fail;
}
/*
* For the final step of resuming the topology, we need to bring the
* state of our in-memory topology back into sync with reality. So,
* restart the probing process as if we're probing a new hub
*/
queue_work(system_long_wq, &mgr->work);
mutex_unlock(&mgr->lock);
if (sync) {
drm_dbg_kms(mgr->dev,
"Waiting for link probe work to finish re-syncing topology...\n");
flush_work(&mgr->work);
}
return 0;
out_fail:
mutex_unlock(&mgr->lock);
return -1;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
{
int len;
u8 replyblock[32];
int replylen, curreply;
int ret;
u8 hdrlen;
struct drm_dp_sideband_msg_hdr hdr;
struct drm_dp_sideband_msg_rx *msg =
up ? &mgr->up_req_recv : &mgr->down_rep_recv;
int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
DP_SIDEBAND_MSG_DOWN_REP_BASE;
if (!up)
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
if (ret != len) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
if (ret == false) {
print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
1, replyblock, len, false);
drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
return false;
}
if (!up) {
/* Caller is responsible for giving back this reference */
*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
if (!*mstb) {
drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
return false;
}
}
if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
return false;
}
replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
if (!ret) {
drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
}
ret = drm_dp_sideband_append_payload(msg, replyblock, len);
if (!ret) {
drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
return false;
}
curreply += len;
replylen -= len;
}
return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb = NULL;
struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
goto out_clear_reply;
/* Multi-packet message transmission, don't clear the reply */
if (!msg->have_eomt)
goto out;
/* find the message */
mutex_lock(&mgr->qlock);
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
mutex_unlock(&mgr->qlock);
/* Were we actually expecting a response, and from this mstb? */
if (!txmsg || txmsg->dst != mstb) {
struct drm_dp_sideband_msg_hdr *hdr;
hdr = &msg->initial_hdr;
drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
goto out_clear_reply;
}
drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
drm_dbg_kms(mgr->dev,
"Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
txmsg->reply.req_type,
drm_dp_mst_req_type_str(txmsg->reply.req_type),
txmsg->reply.u.nak.reason,
drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
txmsg->reply.u.nak.nak_data);
}
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
list_del(&txmsg->next);
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
return 0;
out_clear_reply:
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
out:
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return 0;
}
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
{
struct drm_dp_mst_branch *mstb = NULL;
struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
bool hotplug = false, dowork = false;
if (hdr->broadcast) {
const u8 *guid = NULL;
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
guid = msg->u.conn_stat.guid;
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
if (guid)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
if (!mstb) {
drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
return false;
}
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
hotplug = true;
}
drm_dp_mst_topology_put_mstb(mstb);
if (dowork)
queue_work(system_long_wq, &mgr->work);
return hotplug;
}
static void drm_dp_mst_up_req_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr =
container_of(work, struct drm_dp_mst_topology_mgr,
up_req_work);
struct drm_dp_pending_up_req *up_req;
bool send_hotplug = false;
mutex_lock(&mgr->probe_lock);
while (true) {
mutex_lock(&mgr->up_req_lock);
up_req = list_first_entry_or_null(&mgr->up_req_list,
struct drm_dp_pending_up_req,
next);
if (up_req)
list_del(&up_req->next);
mutex_unlock(&mgr->up_req_lock);
if (!up_req)
break;
send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
kfree(up_req);
}
mutex_unlock(&mgr->probe_lock);
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_pending_up_req *up_req;
if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
goto out;
if (!mgr->up_req_recv.have_eomt)
return 0;
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
if (!up_req)
return -ENOMEM;
INIT_LIST_HEAD(&up_req->next);
drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
up_req->msg.req_type);
kfree(up_req);
goto out;
}
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
conn_stat->legacy_device_plug_status,
conn_stat->displayport_device_plug_status,
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
res_stat->port_number,
res_stat->available_pbn);
}
up_req->hdr = mgr->up_req_recv.initial_hdr;
mutex_lock(&mgr->up_req_lock);
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
out:
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
/**
* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
* @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
* @handled: whether the hpd interrupt was consumed or not
*
* This should be called from the driver when it detects a HPD IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
* topology manager will process the sideband messages received
* as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
* corresponding flags that Driver has to ack the DP receiver later.
*
* Note that driver shall also call
* drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
* after calling this function, to try to kick off a new request in
* the queue if the previous message transaction is completed.
*
* See also:
* drm_dp_mst_hpd_irq_send_new_request()
*/
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
u8 *ack, bool *handled)
{
int ret = 0;
int sc;
*handled = false;
sc = DP_GET_SINK_COUNT(esi[0]);
if (sc != mgr->sink_count) {
mgr->sink_count = sc;
*handled = true;
}
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
ack[1] |= DP_DOWN_REP_MSG_RDY;
}
if (esi[1] & DP_UP_REQ_MSG_RDY) {
ret |= drm_dp_mst_handle_up_req(mgr);
*handled = true;
ack[1] |= DP_UP_REQ_MSG_RDY;
}
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
/**
* drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
* @mgr: manager to notify irq for.
*
* This should be called from the driver when mst irq event is handled
* and acked. Note that new down request should only be sent when
* previous message transaction is completed. Source is not supposed to generate
* interleaved message transactions.
*/
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
bool kick = true;
mutex_lock(&mgr->qlock);
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
/* If last transaction is not completed yet*/
if (!txmsg ||
txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
kick = false;
mutex_unlock(&mgr->qlock);
if (kick)
drm_dp_mst_kick_tx(mgr);
}
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
* @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port
* @port: pointer to a port
*
* This returns the current connection state for a port.
*/
int
drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
int ret;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
ret = drm_modeset_lock(&mgr->base.lock, ctx);
if (ret)
goto out;
ret = connector_status_disconnected;
if (!port->ddps)
goto out;
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
break;
case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
ret = connector_status_connected;
break;
}
out:
drm_dp_mst_topology_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
/**
* drm_dp_mst_edid_read() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
* @port: unverified pointer to a port.
*
* This returns an EDID for the port connected to a connector,
* It validates the pointer still exists so the caller doesn't require a
* reference.
*/
const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
const struct drm_edid *drm_edid;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return NULL;
if (port->cached_edid)
drm_edid = drm_edid_dup(port->cached_edid);
else
drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
drm_dp_mst_topology_put_port(port);
return drm_edid;
}
EXPORT_SYMBOL(drm_dp_mst_edid_read);
/**
* drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
* @port: unverified pointer to a port.
*
* This function is deprecated; please use drm_dp_mst_edid_read() instead.
*
* This returns an EDID for the port connected to a connector,
* It validates the pointer still exists so the caller doesn't require a
* reference.
*/
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
const struct drm_edid *drm_edid;
struct edid *edid;
drm_edid = drm_dp_mst_edid_read(connector, mgr, port);
edid = drm_edid_duplicate(drm_edid_raw(drm_edid));
drm_edid_free(drm_edid);
return edid;
}
EXPORT_SYMBOL(drm_dp_mst_get_edid);
/**
* drm_dp_atomic_find_time_slots() - Find and add time slots to the state
* @state: global atomic state
* @mgr: MST topology manager for the port
* @port: port to find time slots for
* @pbn: bandwidth required for the mode in PBN
*
* Allocates time slots to @port, replacing any previous time slot allocations it may
* have had. Any atomic drivers which support MST must call this function in
* their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
* change the current time slot allocation for the new state, and ensure the MST
* atomic state is added whenever the state of payloads in the topology changes.
*
* Allocations set by this function are not checked against the bandwidth
* restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
*
* Additionally, it is OK to call this function multiple times on the same
* @port as needed. It is not OK however, to call this function and
* drm_dp_atomic_release_time_slots() in the same atomic check phase.
*
* See also:
* drm_dp_atomic_release_time_slots()
* drm_dp_mst_atomic_check()
*
* Returns:
* Total slots in the atomic state assigned for this port, or a negative error
* code if the port no longer exists
*/
int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn)
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_mst_atomic_payload *payload = NULL;
struct drm_connector_state *conn_state;
int prev_slots = 0, prev_bw = 0, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
conn_state = drm_atomic_get_new_connector_state(state, port->connector);
topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
/* Find the current allocation for this port, if any */
payload = drm_atomic_get_mst_payload_state(topology_state, port);
if (payload) {
prev_slots = payload->time_slots;
prev_bw = payload->pbn;
/*
* This should never happen, unless the driver tries
* releasing and allocating the same timeslot allocation,
* which is an error
*/
if (drm_WARN_ON(mgr->dev, payload->delete)) {
drm_err(mgr->dev,
"cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
port);
return -EINVAL;
}
}
req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_slots, req_slots);
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_bw, pbn);
/* Add the new allocation to the state, note the VCPI isn't assigned until the end */
if (!payload) {
payload = kzalloc(sizeof(*payload), GFP_KERNEL);
if (!payload)
return -ENOMEM;
drm_dp_mst_get_port_malloc(port);
payload->port = port;
payload->vc_start_slot = -1;
list_add(&payload->next, &topology_state->payloads);
}
payload->time_slots = req_slots;
payload->pbn = pbn;
return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
/**
* drm_dp_atomic_release_time_slots() - Release allocated time slots
* @state: global atomic state
* @mgr: MST topology manager for the port
* @port: The port to release the time slots from
*
* Releases any time slots that have been allocated to a port in the atomic
* state. Any atomic drivers which support MST must call this function
* unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
* This helper will check whether time slots would be released by the new state and
* respond accordingly, along with ensuring the MST state is always added to the
* atomic state whenever a new state would modify the state of payloads on the
* topology.
*
* It is OK to call this even if @port has been removed from the system.
* Additionally, it is OK to call this function multiple times on the same
* @port as needed. It is not OK however, to call this function and
* drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
* phase.
*
* See also:
* drm_dp_atomic_find_time_slots()
* drm_dp_mst_atomic_check()
*
* Returns:
* 0 on success, negative error code otherwise
*/
int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_mst_atomic_payload *payload;
struct drm_connector_state *old_conn_state, *new_conn_state;
bool update_payload = true;
old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
if (!old_conn_state->crtc)
return 0;
/* If the CRTC isn't disabled by this state, don't release it's payload */
new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
if (new_conn_state->crtc) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
/* No modeset means no payload changes, so it's safe to not pull in the MST state */
if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
update_payload = false;
}
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
if (!update_payload)
return 0;
payload = drm_atomic_get_mst_payload_state(topology_state, port);
if (WARN_ON(!payload)) {
drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
port, &topology_state->base);
return -EINVAL;
}
if (new_conn_state->crtc)
return 0;
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
if (!payload->delete) {
payload->pbn = 0;
payload->delete = true;
topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
}
return 0;
}
EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
/**
* drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
* @state: global atomic state
*
* This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
* currently assigned to an MST topology. Drivers must call this hook from their
* &drm_mode_config_helper_funcs.atomic_commit_setup hook.
*
* Returns:
* 0 if all CRTC commits were retrieved successfully, negative error code otherwise
*/
int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
{
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i, j, commit_idx, num_commit_deps;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
if (!mst_state->pending_crtc_mask)
continue;
num_commit_deps = hweight32(mst_state->pending_crtc_mask);
mst_state->commit_deps = kmalloc_array(num_commit_deps,
sizeof(*mst_state->commit_deps), GFP_KERNEL);
if (!mst_state->commit_deps)
return -ENOMEM;
mst_state->num_commit_deps = num_commit_deps;
commit_idx = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
mst_state->commit_deps[commit_idx++] =
drm_crtc_commit_get(crtc_state->commit);
}
}
}
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
/**
* drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
* prepare new MST state for commit
* @state: global atomic state
*
* Goes through any MST topologies in this atomic state, and waits for any pending commits which
* touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
* returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
* with eachother by forcing them to be executed sequentially in situations where the only resources
* the modeset objects in these commits share are an MST topology.
*
* This function also prepares the new MST state for commit by performing some state preparation
* which can't be done until this point, such as reading back the final VC start slots (which are
* determined at commit-time) from the previous state.
*
* All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
* or whatever their equivalent of that is.
*/
void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
{
struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
int i, j, ret;
for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
for (j = 0; j < old_mst_state->num_commit_deps; j++) {
ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
if (ret < 0)
drm_err(state->dev, "Failed to wait for %s: %d\n",
old_mst_state->commit_deps[j]->crtc->name, ret);
}
/* Now that previous state is committed, it's safe to copy over the start slot
* assignments
*/
list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
if (old_payload->delete)
continue;
new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
old_payload->port);
new_payload->vc_start_slot = old_payload->vc_start_slot;
}
}
}
EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
/**
* drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
* in SST mode
* @new_conn_state: The new connector state of the &drm_connector
* @mgr: The MST topology manager for the &drm_connector
*
* Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
* serialize non-blocking commits happening on the real DP connector of an MST topology switching
* into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
* MST topology will never share the same &drm_encoder.
*
* This function takes care of this serialization issue, by checking a root MST connector's atomic
* state to determine if it is about to have a modeset - and then pulling in the MST topology state
* if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
*
* Drivers implementing MST must call this function from the
* &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
* driving MST sinks.
*
* Returns:
* 0 on success, negative error code otherwise
*/
int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_atomic_state *state = new_conn_state->state;
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, new_conn_state->connector);
struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_state *mst_state = NULL;
if (new_conn_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
}
}
if (old_conn_state->crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
if (!mst_state) {
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
}
mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
}
}
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
/**
* drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
* @mst_state: mst_state to update
* @link_encoding_cap: the ecoding format on the link
*/
void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
{
if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
mst_state->total_avail_slots = 64;
mst_state->start_slot = 0;
} else {
mst_state->total_avail_slots = 63;
mst_state->start_slot = 1;
}
DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
(link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
mst_state);
}
EXPORT_SYMBOL(drm_dp_mst_update_slots);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id, u8 start_slot, u8 num_slots)
{
u8 payload_alloc[3], status;
int ret;
int retries = 0;
drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = id;
payload_alloc[1] = start_slot;
payload_alloc[2] = num_slots;
ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
if (ret != 3) {
drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
goto fail;
}
if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
retries++;
if (retries < 20) {
usleep_range(10000, 20000);
goto retry;
}
drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
status);
ret = -EINVAL;
goto fail;
}
ret = 0;
fail:
return ret;
}
static int do_get_act_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
return status;
}
/**
* drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
*
* Tries waiting for the MST hub to finish updating it's payload table by
* polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
* take that long).
*
* Returns:
* 0 if the ACT was handled in time, negative error code on failure.
*/
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
{
/*
* There doesn't seem to be any recommended retry count or timeout in
* the MST specification. Since some hubs have been observed to take
* over 1 second to update their payload allocations under certain
* conditions, we use a rather large timeout value.
*/
const int timeout_ms = 3000;
int ret, status;
ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
status & DP_PAYLOAD_ACT_HANDLED || status < 0,
200, timeout_ms * USEC_PER_MSEC);
if (ret < 0 && status >= 0) {
drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
timeout_ms, status);
return -EINVAL;
} else if (status < 0) {
/*
* Failure here isn't unexpected - the hub may have
* just been unplugged
*/
drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
return status;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_check_act_status);
/**
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
* @clock: dot clock for the mode
* @bpp: bpp for the mode.
* @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
{
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
* common multiplier to render an integer PBN for all link rate/lane
* counts combinations
* calculate
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*
* If the bpp is in units of 1/16, further divide by 16. Put this
* factor in the numerator rather than the denominator to avoid
* integer overflow
*/
if (dsc)
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
8 * 54 * 1000 * 1000);
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
8 * 54 * 1000 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
/* we want to kick the TX after we've ack the up/down IRQs. */
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
{
queue_work(system_long_wq, &mgr->tx_work);
}
/*
* Helper function for parsing DP device types into convenient strings
* for use with dp_mst_topology
*/
static const char *pdt_to_string(u8 pdt)
{
switch (pdt) {
case DP_PEER_DEVICE_NONE:
return "NONE";
case DP_PEER_DEVICE_SOURCE_OR_SST:
return "SOURCE OR SST";
case DP_PEER_DEVICE_MST_BRANCHING:
return "MST BRANCHING";
case DP_PEER_DEVICE_SST_SINK:
return "SST SINK";
case DP_PEER_DEVICE_DP_LEGACY_CONV:
return "DP LEGACY CONV";
default:
return "ERR";
}
}
static void drm_dp_mst_dump_mstb(struct seq_file *m,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
int tabs = mstb->lct;
char prefix[10];
int i;
for (i = 0; i < tabs; i++)
prefix[i] = '\t';
prefix[i] = '\0';
seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
list_for_each_entry(port, &mstb->ports, next) {
seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
prefix,
port->port_num,
port,
port->input ? "input" : "output",
pdt_to_string(port->pdt),
port->ddps,
port->ldps,
port->num_sdp_streams,
port->num_sdp_stream_sinks,
port->fec_capable ? "true" : "false",
port->connector);
if (port->mstb)
drm_dp_mst_dump_mstb(m, port->mstb);
}
}
#define DP_PAYLOAD_TABLE_SIZE 64
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf)
{
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
if (drm_dp_dpcd_read(mgr->aux,
DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
&buf[i], 16) != 16)
return false;
}
return true;
}
static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, char *name,
int namelen)
{
struct edid *mst_edid;
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
kfree(mst_edid);
}
/**
* drm_dp_mst_dump_topology(): dump topology to seq file.
* @m: seq_file to dump output to
* @mgr: manager to dump current topology for.
*
* helper to dump MST topology to a seq file for debugfs.
*/
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_mst_topology_state *state;
struct drm_dp_mst_atomic_payload *payload;
int i, ret;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
drm_dp_mst_dump_mstb(m, mgr->mst_primary);
/* dump VCPIs */
mutex_unlock(&mgr->lock);
ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
if (ret < 0)
return;
state = to_drm_dp_mst_topology_state(mgr->base.state);
seq_printf(m, "\n*** Atomic state info ***\n");
seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) {
list_for_each_entry(payload, &state->payloads, next) {
char name[14];
if (payload->vcpi != i || payload->delete)
continue;
fetch_monitor_name(mgr, payload->port, name, sizeof(name));
seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
i,
payload->port->port_num,
payload->vcpi,
payload->vc_start_slot,
payload->vc_start_slot + payload->time_slots - 1,
payload->pbn,
payload->dsc_enabled ? "Y" : "N",
(*name != 0) ? name : "Unknown");
}
}
seq_printf(m, "\n*** DPCD Info ***\n");
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
u8 buf[DP_PAYLOAD_TABLE_SIZE];
int ret;
if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
seq_printf(m, "dpcd read failed\n");
goto out;
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
if (ret != 2) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
if (ret != 1) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
if (dump_dp_payload_table(mgr, buf))
seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
}
out:
mutex_unlock(&mgr->lock);
drm_modeset_unlock(&mgr->base.lock);
}
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
static void drm_dp_tx_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
if (port->connector) {
drm_connector_unregister(port->connector);
drm_connector_put(port->connector);
}
drm_dp_mst_put_port_malloc(port);
}
static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port, *port_tmp;
struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
bool wake_tx = false;
mutex_lock(&mgr->lock);
list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
}
mutex_unlock(&mgr->lock);
/* drop any tx slot msg */
mutex_lock(&mstb->mgr->qlock);
list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
if (txmsg->dst != mstb)
continue;
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
list_del(&txmsg->next);
wake_tx = true;
}
mutex_unlock(&mstb->mgr->qlock);
if (wake_tx)
wake_up_all(&mstb->mgr->tx_waitq);
drm_dp_mst_put_mstb_malloc(mstb);
}
static void drm_dp_delayed_destroy_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr =
container_of(work, struct drm_dp_mst_topology_mgr,
delayed_destroy_work);
bool send_hotplug = false, go_again;
/*
* Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
do {
go_again = false;
for (;;) {
struct drm_dp_mst_branch *mstb;
mutex_lock(&mgr->delayed_destroy_lock);
mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
struct drm_dp_mst_branch,
destroy_next);
if (mstb)
list_del(&mstb->destroy_next);
mutex_unlock(&mgr->delayed_destroy_lock);
if (!mstb)
break;
drm_dp_delayed_destroy_mstb(mstb);
go_again = true;
}
for (;;) {
struct drm_dp_mst_port *port;
mutex_lock(&mgr->delayed_destroy_lock);
port = list_first_entry_or_null(&mgr->destroy_port_list,
struct drm_dp_mst_port,
next);
if (port)
list_del(&port->next);
mutex_unlock(&mgr->delayed_destroy_lock);
if (!port)
break;
drm_dp_delayed_destroy_port(port);
send_hotplug = true;
go_again = true;
}
} while (go_again);
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
struct drm_dp_mst_topology_state *state, *old_state =
to_dp_mst_topology_state(obj->state);
struct drm_dp_mst_atomic_payload *pos, *payload;
state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
INIT_LIST_HEAD(&state->payloads);
state->commit_deps = NULL;
state->num_commit_deps = 0;
state->pending_crtc_mask = 0;
list_for_each_entry(pos, &old_state->payloads, next) {
/* Prune leftover freed timeslot allocations */
if (pos->delete)
continue;
payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
if (!payload)
goto fail;
drm_dp_mst_get_port_malloc(payload->port);
list_add(&payload->next, &state->payloads);
}
return &state->base;
fail:
list_for_each_entry_safe(pos, payload, &state->payloads, next) {
drm_dp_mst_put_port_malloc(pos->port);
kfree(pos);
}
kfree(state);
return NULL;
}
static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
struct drm_dp_mst_atomic_payload *pos, *tmp;
int i;
list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
/* We only keep references to ports with active payloads */
if (!pos->delete)
drm_dp_mst_put_port_malloc(pos->port);
kfree(pos);
}
for (i = 0; i < mst_state->num_commit_deps; i++)
drm_crtc_commit_put(mst_state->commit_deps[i]);
kfree(mst_state->commit_deps);
kfree(mst_state);
}
static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *branch)
{
while (port->parent) {
if (port->parent == branch)
return true;
if (port->parent->port_parent)
port = port->parent->port_parent;
else
break;
}
return false;
}
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state);
static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_topology_state *state)
{
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_port *port;
int pbn_used = 0, ret;
bool found = false;
/* Check that we have at least one port in our state that's downstream
* of this branch, otherwise we can skip this branch
*/
list_for_each_entry(payload, &state->payloads, next) {
if (!payload->pbn ||
!drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
continue;
found = true;
break;
}
if (!found)
return 0;
if (mstb->port_parent)
drm_dbg_atomic(mstb->mgr->dev,
"[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
mstb->port_parent->parent, mstb->port_parent, mstb);
else
drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
list_for_each_entry(port, &mstb->ports, next) {
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
if (ret < 0)
return ret;
pbn_used += ret;
}
return pbn_used;
}
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state)
{
struct drm_dp_mst_atomic_payload *payload;
int pbn_used = 0;
if (port->pdt == DP_PEER_DEVICE_NONE)
return 0;
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
payload = drm_atomic_get_mst_payload_state(state, port);
if (!payload)
return 0;
/*
* This could happen if the sink deasserted its HPD line, but
* the branch device still reports it as attached (PDT != NONE).
*/
if (!port->full_pbn) {
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
port->parent, port);
return -EINVAL;
}
pbn_used = payload->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
state);
if (pbn_used <= 0)
return pbn_used;
}
if (pbn_used > port->full_pbn) {
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
port->parent, port, pbn_used, port->full_pbn);
return -ENOSPC;
}
drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
port->parent, port, pbn_used, port->full_pbn);
return pbn_used;
}
static inline int
drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state)
{
struct drm_dp_mst_atomic_payload *payload;
int avail_slots = mst_state->total_avail_slots, payload_count = 0;
list_for_each_entry(payload, &mst_state->payloads, next) {
/* Releasing payloads is always OK-even if the port is gone */
if (payload->delete) {
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
payload->port);
continue;
}
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
payload->port, payload->time_slots);
avail_slots -= payload->time_slots;
if (avail_slots < 0) {
drm_dbg_atomic(mgr->dev,
"[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
payload->port, mst_state, avail_slots + payload->time_slots);
return -ENOSPC;
}
if (++payload_count > mgr->max_payloads) {
drm_dbg_atomic(mgr->dev,
"[MST MGR:%p] state %p has too many payloads (max=%d)\n",
mgr, mst_state, mgr->max_payloads);
return -EINVAL;
}
/* Assign a VCPI */
if (!payload->vcpi) {
payload->vcpi = ffz(mst_state->payload_mask) + 1;
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
payload->port, payload->vcpi);
mst_state->payload_mask |= BIT(payload->vcpi - 1);
}
}
if (!payload_count)
mst_state->pbn_div = 0;
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
mgr, mst_state, mst_state->pbn_div, avail_slots,
mst_state->total_avail_slots - avail_slots);
return 0;
}
/**
* drm_dp_mst_add_affected_dsc_crtcs
* @state: Pointer to the new struct drm_dp_mst_topology_state
* @mgr: MST topology manager
*
* Whenever there is a change in mst topology
* DSC configuration would have to be recalculated
* therefore we need to trigger modeset on all affected
* CRTCs in that topology
*
* See also:
* drm_dp_mst_atomic_enable_dsc()
*/
int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_atomic_payload *pos;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
list_for_each_entry(pos, &mst_state->payloads, next) {
connector = pos->port->connector;
if (!connector)
return -EINVAL;
conn_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(conn_state))
return PTR_ERR(conn_state);
crtc = conn_state->crtc;
if (!crtc)
continue;
if (!drm_dp_mst_dsc_aux_for_port(pos->port))
continue;
crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
mgr, crtc);
crtc_state->mode_changed = true;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
/**
* drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
* @state: Pointer to the new drm_atomic_state
* @port: Pointer to the affected MST Port
* @pbn: Newly recalculated bw required for link with DSC enabled
* @enable: Boolean flag to enable or disable DSC on the port
*
* This function enables DSC on the given Port
* by recalculating its vcpi from pbn provided
* and sets dsc_enable flag to keep track of which
* ports have DSC enabled
*
*/
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
struct drm_dp_mst_port *port,
int pbn, bool enable)
{
struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_atomic_payload *payload;
int time_slots = 0;
mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
payload = drm_atomic_get_mst_payload_state(mst_state, port);
if (!payload) {
drm_dbg_atomic(state->dev,
"[MST PORT:%p] Couldn't find payload in mst state %p\n",
port, mst_state);
return -EINVAL;
}
if (payload->dsc_enabled == enable) {
drm_dbg_atomic(state->dev,
"[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
port, enable, payload->time_slots);
time_slots = payload->time_slots;
}
if (enable) {
time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
drm_dbg_atomic(state->dev,
"[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
port, time_slots);
if (time_slots < 0)
return -EINVAL;
}
payload->dsc_enabled = enable;
return time_slots;
}
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
* @state: Pointer to the new &struct drm_dp_mst_topology_state
*
* Checks the given topology state for an atomic update to ensure that it's
* valid. This includes checking whether there's enough bandwidth to support
* the new timeslot allocations in the atomic update.
*
* Any atomic drivers supporting DP MST must make sure to call this after
* checking the rest of their state in their
* &drm_mode_config_funcs.atomic_check() callback.
*
* See also:
* drm_dp_atomic_find_time_slots()
* drm_dp_atomic_release_time_slots()
*
* Returns:
*
* 0 if the new state is valid, negative error code otherwise.
*/
int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
{
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
int i, ret = 0;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
if (!mgr->mst_state)
continue;
ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
if (ret)
break;
mutex_lock(&mgr->lock);
ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
mst_state);
mutex_unlock(&mgr->lock);
if (ret < 0)
break;
else
ret = 0;
}
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_atomic_check);
const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
.atomic_duplicate_state = drm_dp_mst_duplicate_state,
.atomic_destroy_state = drm_dp_mst_destroy_state,
};
EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
/**
* drm_atomic_get_mst_topology_state: get MST topology state
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
* This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* RETURNS:
*
* The MST topology state or error pointer.
*/
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
/**
* drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
* This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* Returns:
*
* The old MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
struct drm_dp_mst_topology_state *
drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_private_state *old_priv_state =
drm_atomic_get_old_private_obj_state(state, &mgr->base);
return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
}
EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
/**
* drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
* This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* Returns:
*
* The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_private_state *new_priv_state =
drm_atomic_get_new_private_obj_state(state, &mgr->base);
return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
}
EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
* @dev: device providing this structure - for i2c addition.
* @aux: DP helper aux channel to talk to this device
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
* @conn_base_id: the connector object ID the MST device is connected to.
*
* Return 0 for success, or negative error code on failure
*/
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes, int max_payloads,
int conn_base_id)
{
struct drm_dp_mst_topology_state *mst_state;
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->delayed_destroy_lock);
mutex_init(&mgr->up_req_lock);
mutex_init(&mgr->probe_lock);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
mutex_init(&mgr->topology_ref_history_lock);
stack_depot_init();
#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_port_list);
INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
INIT_LIST_HEAD(&mgr->up_req_list);
/*
* delayed_destroy_work will be queued on a dedicated WQ, so that any
* requeuing will be also flushed when deiniting the topology manager.
*/
mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
if (mgr->delayed_destroy_wq == NULL)
return -ENOMEM;
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
mgr->conn_base_id = conn_base_id;
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
return -ENOMEM;
mst_state->total_avail_slots = 63;
mst_state->start_slot = 1;
mst_state->mgr = mgr;
INIT_LIST_HEAD(&mst_state->payloads);
drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base,
&drm_dp_mst_topology_state_funcs);
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
/**
* drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
* @mgr: manager to destroy
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
/* The following will also drain any requeued work on the WQ. */
if (mgr->delayed_destroy_wq) {
destroy_workqueue(mgr->delayed_destroy_wq);
mgr->delayed_destroy_wq = NULL;
}
mgr->dev = NULL;
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
mutex_destroy(&mgr->delayed_destroy_lock);
mutex_destroy(&mgr->qlock);
mutex_destroy(&mgr->lock);
mutex_destroy(&mgr->up_req_lock);
mutex_destroy(&mgr->probe_lock);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
mutex_destroy(&mgr->topology_ref_history_lock);
#endif
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
{
int i;
if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
return false;
for (i = 0; i < num - 1; i++) {
if (msgs[i].flags & I2C_M_RD ||
msgs[i].len > 0xff)
return false;
}
return msgs[num - 1].flags & I2C_M_RD &&
msgs[num - 1].len <= 0xff;
}
static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
{
int i;
for (i = 0; i < num - 1; i++) {
if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
msgs[i].len > 0xff)
return false;
}
return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
}
static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port,
struct i2c_msg *msgs, int num)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i;
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_READ;
msg.u.i2c_read.num_transactions = num - 1;
msg.u.i2c_read.port_number = port->port_num;
for (i = 0; i < num - 1; i++) {
msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
}
msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto out;
}
txmsg->dst = mstb;
drm_dp_encode_sideband_req(&msg, txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
ret = -EREMOTEIO;
goto out;
}
if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
ret = -EIO;
goto out;
}
memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
ret = num;
}
out:
kfree(txmsg);
return ret;
}
static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port,
struct i2c_msg *msgs, int num)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i;
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num; i++) {
memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_WRITE;
msg.u.i2c_write.port_number = port->port_num;
msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
msg.u.i2c_write.num_bytes = msgs[i].len;
msg.u.i2c_write.bytes = msgs[i].buf;
memset(txmsg, 0, sizeof(*txmsg));
txmsg->dst = mstb;
drm_dp_encode_sideband_req(&msg, txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
ret = -EREMOTEIO;
goto out;
}
} else {
goto out;
}
}
ret = num;
out:
kfree(txmsg);
return ret;
}
/* I2C device */
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
struct drm_dp_mst_port *port =
container_of(aux, struct drm_dp_mst_port, aux);
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
int ret;
mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
if (remote_i2c_read_ok(msgs, num)) {
ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
} else if (remote_i2c_write_ok(msgs, num)) {
ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
} else {
drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
ret = -EIO;
}
drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
.functionality = drm_dp_mst_i2c_functionality,
.master_xfer = drm_dp_mst_i2c_xfer,
};
/**
* drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
* @port: The port to add the I2C bus on
*
* Returns 0 on success or a negative error code on failure.
*/
static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
{
struct drm_dp_aux *aux = &port->aux;
struct device *parent_dev = port->mgr->dev->dev;
aux->ddc.algo = &drm_dp_mst_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
/* FIXME: set the kdev of the port's connector as parent */
aux->ddc.dev.parent = parent_dev;
aux->ddc.dev.of_node = parent_dev->of_node;
strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
sizeof(aux->ddc.name));
return i2c_add_adapter(&aux->ddc);
}
/**
* drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
* @port: The port to remove the I2C bus from
*/
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
{
i2c_del_adapter(&port->aux.ddc);
}
/**
* drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
* @port: The port to check
*
* A single physical MST hub object can be represented in the topology
* by multiple branches, with virtual ports between those branches.
*
* As of DP1.4, An MST hub with internal (virtual) ports must expose
* certain DPCD registers over those ports. See sections 2.6.1.1.1
* and 2.6.1.1.2 of Display Port specification v1.4 for details.
*
* May acquire mgr->lock
*
* Returns:
* true if the port is a virtual DP peer device, false otherwise
*/
static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *downstream_port;
if (!port || port->dpcd_rev < DP_DPCD_REV_14)
return false;
/* Virtual DP Sink (Internal Display Panel) */
if (port->port_num >= 8)
return true;
/* DP-to-HDMI Protocol Converter */
if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
!port->mcs &&
port->ldps)
return true;
/* DP-to-DP */
mutex_lock(&port->mgr->lock);
if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
port->mstb &&
port->mstb->num_ports == 2) {
list_for_each_entry(downstream_port, &port->mstb->ports, next) {
if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
!downstream_port->input) {
mutex_unlock(&port->mgr->lock);
return true;
}
}
}
mutex_unlock(&port->mgr->lock);
return false;
}
/**
* drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
* @port: The port to check. A leaf of the MST tree with an attached display.
*
* Depending on the situation, DSC may be enabled via the endpoint aux,
* the immediately upstream aux, or the connector's physical aux.
*
* This is both the correct aux to read DSC_CAPABILITY and the
* correct aux to write DSC_ENABLED.
*
* This operation can be expensive (up to four aux reads), so
* the caller should cache the return.
*
* Returns:
* NULL if DSC cannot be enabled on this port, otherwise the aux device
*/
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *immediate_upstream_port;
struct drm_dp_mst_port *fec_port;
struct drm_dp_desc desc = {};
u8 endpoint_fec;
u8 endpoint_dsc;
if (!port)
return NULL;
if (port->parent->port_parent)
immediate_upstream_port = port->parent->port_parent;
else
immediate_upstream_port = NULL;
fec_port = immediate_upstream_port;
while (fec_port) {
/*
* Each physical link (i.e. not a virtual port) between the
* output and the primary device must support FEC
*/
if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
!fec_port->fec_capable)
return NULL;
fec_port = fec_port->parent->port_parent;
}
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
u8 upstream_dsc;
if (drm_dp_dpcd_read(&port->aux,
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
return NULL;
if (drm_dp_dpcd_read(&port->aux,
DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
return NULL;
if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE) &&
(upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
port->passthrough_aux = &immediate_upstream_port->aux;
return &port->aux;
}
/* Virtual DPCD decompression with DP-to-DP peer device */
return &immediate_upstream_port->aux;
}
/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
if (drm_dp_mst_is_virtual_dpcd(port))
return &port->aux;
/*
* Synaptics quirk
* Applies to ports for which:
* - Physical aux has Synaptics OUI
* - DPv1.4 or higher
* - Port is on primary branch device
* - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
*/
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
return NULL;
if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
!= DP_DWN_STRM_PORT_TYPE_ANALOG))
return port->mgr->aux;
}
/*
* The check below verifies if the MST sink
* connected to the GPU is capable of DSC -
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
if (drm_dp_dpcd_read(&port->aux,
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
return NULL;
if (drm_dp_dpcd_read(&port->aux,
DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
return &port->aux;
return NULL;
}
EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
| linux-master | drivers/gpu/drm/display/drm_dp_mst_topology.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2021 Google Inc.
*
* The DP AUX bus is used for devices that are connected over a DisplayPort
* AUX bus. The device on the far side of the bus is referred to as an
* endpoint in this code.
*
* There is only one device connected to the DP AUX bus: an eDP panel.
* Though historically panels (even DP panels) have been modeled as simple
* platform devices, putting them under the DP AUX bus allows the panel driver
* to perform transactions on that bus.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
struct dp_aux_ep_device_with_data {
struct dp_aux_ep_device aux_ep;
int (*done_probing)(struct drm_dp_aux *aux);
};
/**
* dp_aux_ep_match() - The match function for the dp_aux_bus.
* @dev: The device to match.
* @drv: The driver to try to match against.
*
* At the moment, we just match on device tree.
*
* Return: True if this driver matches this device; false otherwise.
*/
static int dp_aux_ep_match(struct device *dev, struct device_driver *drv)
{
return !!of_match_device(drv->of_match_table, dev);
}
/**
* dp_aux_ep_probe() - The probe function for the dp_aux_bus.
* @dev: The device to probe.
*
* Calls through to the endpoint driver probe.
*
* Return: 0 if no error or negative error code.
*/
static int dp_aux_ep_probe(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
struct dp_aux_ep_device_with_data *aux_ep_with_data =
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
int ret;
ret = dev_pm_domain_attach(dev, true);
if (ret)
return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n");
ret = aux_ep_drv->probe(aux_ep);
if (ret)
goto err_attached;
if (aux_ep_with_data->done_probing) {
ret = aux_ep_with_data->done_probing(aux_ep->aux);
if (ret) {
/*
* The done_probing() callback should not return
* -EPROBE_DEFER to us. If it does, we treat it as an
* error. Passing it on as-is would cause the _panel_
* to defer.
*/
if (ret == -EPROBE_DEFER) {
dev_err(dev,
"DP AUX done_probing() can't defer\n");
ret = -EINVAL;
}
goto err_probed;
}
}
return 0;
err_probed:
if (aux_ep_drv->remove)
aux_ep_drv->remove(aux_ep);
err_attached:
dev_pm_domain_detach(dev, true);
return ret;
}
/**
* dp_aux_ep_remove() - The remove function for the dp_aux_bus.
* @dev: The device to remove.
*
* Calls through to the endpoint driver remove.
*/
static void dp_aux_ep_remove(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
if (aux_ep_drv->remove)
aux_ep_drv->remove(aux_ep);
dev_pm_domain_detach(dev, true);
}
/**
* dp_aux_ep_shutdown() - The shutdown function for the dp_aux_bus.
* @dev: The device to shutdown.
*
* Calls through to the endpoint driver shutdown.
*/
static void dp_aux_ep_shutdown(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv;
if (!dev->driver)
return;
aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
if (aux_ep_drv->shutdown)
aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev));
}
static struct bus_type dp_aux_bus_type = {
.name = "dp-aux",
.match = dp_aux_ep_match,
.probe = dp_aux_ep_probe,
.remove = dp_aux_ep_remove,
.shutdown = dp_aux_ep_shutdown,
};
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return of_device_modalias(dev, buf, PAGE_SIZE);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *dp_aux_ep_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(dp_aux_ep_dev);
/**
* dp_aux_ep_dev_release() - Free memory for the dp_aux_ep device
* @dev: The device to free.
*/
static void dp_aux_ep_dev_release(struct device *dev)
{
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
struct dp_aux_ep_device_with_data *aux_ep_with_data =
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
kfree(aux_ep_with_data);
}
static int dp_aux_ep_dev_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
return of_device_uevent_modalias(dev, env);
}
static struct device_type dp_aux_device_type_type = {
.groups = dp_aux_ep_dev_groups,
.uevent = dp_aux_ep_dev_modalias,
.release = dp_aux_ep_dev_release,
};
/**
* of_dp_aux_ep_destroy() - Destroy an DP AUX endpoint device
* @dev: The device to destroy.
* @data: Not used
*
* This is just used as a callback by of_dp_aux_depopulate_bus() and
* is called for _all_ of the child devices of the device providing the AUX bus.
* We'll only act on those that are of type "dp_aux_bus_type".
*
* This function is effectively an inverse of what's in
* of_dp_aux_populate_bus(). NOTE: since we only populate one child
* then it's expected that only one device will match all the "if" tests in
* this function and get to the device_unregister().
*
* Return: 0 if no error or negative error code.
*/
static int of_dp_aux_ep_destroy(struct device *dev, void *data)
{
struct device_node *np = dev->of_node;
if (dev->bus != &dp_aux_bus_type)
return 0;
if (!of_node_check_flag(np, OF_POPULATED))
return 0;
of_node_clear_flag(np, OF_POPULATED);
of_node_put(np);
device_unregister(dev);
return 0;
}
/**
* of_dp_aux_depopulate_bus() - Undo of_dp_aux_populate_bus
* @aux: The AUX channel whose device we want to depopulate
*
* This will destroy the device that was created
* by of_dp_aux_populate_bus().
*/
void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux)
{
device_for_each_child_reverse(aux->dev, NULL, of_dp_aux_ep_destroy);
}
EXPORT_SYMBOL_GPL(of_dp_aux_depopulate_bus);
/**
* of_dp_aux_populate_bus() - Populate the endpoint device on the DP AUX
* @aux: The AUX channel whose device we want to populate. It is required that
* drm_dp_aux_init() has already been called for this AUX channel.
* @done_probing: Callback functions to call after EP device finishes probing.
* Will not be called if there are no EP devices and this
* function will return -ENODEV.
*
* This will populate the device (expected to be an eDP panel) under the
* "aux-bus" node of the device providing the AUX channel (AKA aux->dev).
*
* When this function finishes, it is _possible_ (but not guaranteed) that
* our sub-device will have finished probing. It should be noted that if our
* sub-device returns -EPROBE_DEFER or is probing asynchronously for some
* reason that we will not return any error codes ourselves but our
* sub-device will _not_ have actually probed successfully yet.
*
* In many cases it's important for the caller of this function to be notified
* when our sub device finishes probing. Our sub device is expected to be an
* eDP panel and the caller is expected to be an eDP controller. The eDP
* controller needs to be able to get a reference to the panel when it finishes
* probing. For this reason the caller can pass in a function pointer that
* will be called when our sub-device finishes probing.
*
* If this function succeeds you should later make sure you call
* of_dp_aux_depopulate_bus() to undo it, or just use the devm version
* of this function.
*
* Return: 0 if no error or negative error code; returns -ENODEV if there are
* no children. The done_probing() function won't be called in that
* case.
*/
int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
int (*done_probing)(struct drm_dp_aux *aux))
{
struct device_node *bus = NULL, *np = NULL;
struct dp_aux_ep_device *aux_ep;
struct dp_aux_ep_device_with_data *aux_ep_with_data;
int ret;
/* drm_dp_aux_init() should have been called already; warn if not */
WARN_ON_ONCE(!aux->ddc.algo);
if (!aux->dev->of_node)
return -ENODEV;
bus = of_get_child_by_name(aux->dev->of_node, "aux-bus");
if (!bus)
return -ENODEV;
np = of_get_next_available_child(bus, NULL);
of_node_put(bus);
if (!np)
return -ENODEV;
if (of_node_test_and_set_flag(np, OF_POPULATED)) {
dev_err(aux->dev, "DP AUX EP device already populated\n");
ret = -EINVAL;
goto err_did_get_np;
}
aux_ep_with_data = kzalloc(sizeof(*aux_ep_with_data), GFP_KERNEL);
if (!aux_ep_with_data) {
ret = -ENOMEM;
goto err_did_set_populated;
}
aux_ep_with_data->done_probing = done_probing;
aux_ep = &aux_ep_with_data->aux_ep;
aux_ep->aux = aux;
aux_ep->dev.parent = aux->dev;
aux_ep->dev.bus = &dp_aux_bus_type;
aux_ep->dev.type = &dp_aux_device_type_type;
aux_ep->dev.of_node = of_node_get(np);
dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev));
ret = device_register(&aux_ep->dev);
if (ret) {
dev_err(aux->dev, "Failed to create AUX EP for %pOF: %d\n", np, ret);
/*
* As per docs of device_register(), call this instead
* of kfree() directly for error cases.
*/
put_device(&aux_ep->dev);
goto err_did_set_populated;
}
return 0;
err_did_set_populated:
of_node_clear_flag(np, OF_POPULATED);
err_did_get_np:
of_node_put(np);
return ret;
}
EXPORT_SYMBOL_GPL(of_dp_aux_populate_bus);
static void of_dp_aux_depopulate_bus_void(void *data)
{
of_dp_aux_depopulate_bus(data);
}
/**
* devm_of_dp_aux_populate_bus() - devm wrapper for of_dp_aux_populate_bus()
* @aux: The AUX channel whose device we want to populate
* @done_probing: Callback functions to call after EP device finishes probing.
* Will not be called if there are no EP devices and this
* function will return -ENODEV.
*
* Handles freeing w/ devm on the device "aux->dev".
*
* Return: 0 if no error or negative error code; returns -ENODEV if there are
* no children. The done_probing() function won't be called in that
* case.
*/
int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux,
int (*done_probing)(struct drm_dp_aux *aux))
{
int ret;
ret = of_dp_aux_populate_bus(aux, done_probing);
if (ret)
return ret;
return devm_add_action_or_reset(aux->dev,
of_dp_aux_depopulate_bus_void, aux);
}
EXPORT_SYMBOL_GPL(devm_of_dp_aux_populate_bus);
int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *drv, struct module *owner)
{
drv->driver.owner = owner;
drv->driver.bus = &dp_aux_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__dp_aux_dp_driver_register);
void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(dp_aux_dp_driver_unregister);
static int __init dp_aux_bus_init(void)
{
int ret;
ret = bus_register(&dp_aux_bus_type);
if (ret)
return ret;
return 0;
}
static void __exit dp_aux_bus_exit(void)
{
bus_unregister(&dp_aux_bus_type);
}
subsys_initcall(dp_aux_bus_init);
module_exit(dp_aux_bus_exit);
MODULE_AUTHOR("Douglas Anderson <[email protected]>");
MODULE_DESCRIPTION("DRM DisplayPort AUX bus");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/display/drm_dp_aux_bus.c |
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
/**
* DOC: dp dual mode helpers
*
* Helper functions to deal with DP dual mode (aka. DP++) adaptors.
*
* Type 1:
* Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
*
* Type 2:
* Adaptor registers and sink DDC bus can be accessed either via I2C or
* I2C-over-AUX. Source devices may choose to implement either of these
* access methods.
*/
#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
/**
* drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
* @size: sizo of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
*
* Returns:
* 0 on success, negative error code on failure
*/
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size)
{
u8 zero = 0;
char *tmpbuf = NULL;
/*
* As sub-addressing is not supported by all adaptors,
* always explicitly read from the start and discard
* any bytes that come before the requested offset.
* This way, no matter whether the adaptor supports it
* or not, we'll end up reading the proper data.
*/
struct i2c_msg msgs[] = {
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
.buf = &zero,
},
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = I2C_M_RD,
.len = size + offset,
.buf = buffer,
},
};
int ret;
if (offset) {
tmpbuf = kmalloc(size + offset, GFP_KERNEL);
if (!tmpbuf)
return -ENOMEM;
msgs[1].buf = tmpbuf;
}
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
if (tmpbuf)
memcpy(buffer, tmpbuf + offset, size);
kfree(tmpbuf);
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_read);
/**
* drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
* @size: sizo of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
*
* Returns:
* 0 on success, negative error code on failure
*/
ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size)
{
struct i2c_msg msg = {
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1 + size,
.buf = NULL,
};
void *data;
int ret;
data = kmalloc(msg.len, GFP_KERNEL);
if (!data)
return -ENOMEM;
msg.buf = data;
memcpy(data, &offset, 1);
memcpy(data + 1, buffer, size);
ret = i2c_transfer(adapter, &msg, 1);
kfree(data);
if (ret < 0)
return ret;
if (ret != 1)
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_write);
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
"DP-HDMI ADAPTOR\x04";
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
static bool is_type1_adaptor(uint8_t adaptor_id)
{
return adaptor_id == 0 || adaptor_id == 0xff;
}
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
const uint8_t adaptor_id)
{
return is_hdmi_adaptor(hdmi_id) &&
(adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_TYPE_HAS_DPCD));
}
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @dev: &drm_device to use
* @adapter: I2C adapter for the DDC bus
*
* Attempt to identify the type of the DP dual mode adaptor used.
*
* Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
* certain whether we're dealing with a native HDMI port or
* a type 1 DVI dual mode adaptor. The driver will have to use
* some other hardware/driver specific mechanism to make that
* distinction.
*
* Returns:
* The type of the DP dual mode adaptor used
*/
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
struct i2c_adapter *adapter)
{
char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
uint8_t adaptor_id = 0x00;
ssize_t ret;
/*
* Let's see if the adaptor is there the by reading the
* HDMI ID registers.
*
* Note that type 1 DVI adaptors are not required to implemnt
* any registers, and that presents a problem for detection.
* If the i2c transfer is nacked, we may or may not be dealing
* with a type 1 DVI adaptor. Some other mechanism of detecting
* the presence of the adaptor is required. One way would be
* to check the state of the CONFIG1 pin, Another method would
* simply require the driver to know whether the port is a DP++
* port or a native HDMI port. Both of these methods are entirely
* hardware/driver specific so we can't deal with them here.
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
drm_dbg_kms(dev, "DP dual mode HDMI ID: %*pE (err %zd)\n",
ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret);
if (ret == 0) {
if (is_lspcon_adaptor(hdmi_id, adaptor_id))
return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
/*
* If not a proper type 1 ID, still assume type 1, but let
* the user know that we may have misdetected the type.
*/
if (!is_type1_adaptor(adaptor_id))
drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id);
}
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE1_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE1_DVI;
}
EXPORT_SYMBOL(drm_dp_dual_mode_detect);
/**
* drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
* @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
*
* Determine the max TMDS clock the adaptor supports based on the
* type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
* register (on type2 adaptors). As some type 1 adaptors have
* problems with registers (see comments in drm_dp_dual_mode_detect())
* we don't read the register on those, instead we simply assume
* a 165 MHz limit based on the specification.
*
* Returns:
* Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
*/
int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter)
{
uint8_t max_tmds_clock;
ssize_t ret;
/* native HDMI so no limit */
if (type == DRM_DP_DUAL_MODE_NONE)
return 0;
/*
* Type 1 adaptors are limited to 165MHz
* Type 2 adaptors can tells us their limit
*/
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 165000;
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
&max_tmds_clock, sizeof(max_tmds_clock));
if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
drm_dbg_kms(dev, "Failed to query max TMDS clock\n");
return 165000;
}
return max_tmds_clock * 5000 / 2;
}
EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
/**
* drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
* @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enabled: current state of the TMDS output buffers
*
* Get the state of the TMDS output buffers in the adaptor. For
* type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
* register. As some type 1 adaptors have problems with registers
* (see comments in drm_dp_dual_mode_detect()) we don't read the
* register on those, instead we simply assume that the buffers
* are always enabled.
*
* Returns:
* 0 on success, negative error code on failure
*/
int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev,
enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter,
bool *enabled)
{
uint8_t tmds_oen;
ssize_t ret;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
*enabled = true;
return 0;
}
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
drm_dbg_kms(dev, "Failed to query state of TMDS output buffers\n");
return ret;
}
*enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
/**
* drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
* @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enable: enable (as opposed to disable) the TMDS output buffers
*
* Set the state of the TMDS output buffers in the adaptor. For
* type2 this is set via the DP_DUAL_MODE_TMDS_OEN register.
* Type1 adaptors do not support any register writes.
*
* Returns:
* 0 on success, negative error code on failure
*/
int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable)
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
/*
* LSPCON adapters in low-power state may ignore the first write, so
* read back and verify the written value a few times.
*/
for (retry = 0; retry < 3; retry++) {
uint8_t tmp;
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
drm_dbg_kms(dev, "Failed to %s TMDS output buffers (%d attempts)\n",
enable ? "enable" : "disable", retry + 1);
return ret;
}
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmp, sizeof(tmp));
if (ret) {
drm_dbg_kms(dev,
"I2C read failed during TMDS output buffer %s (%d attempts)\n",
enable ? "enabling" : "disabling", retry + 1);
return ret;
}
if (tmp == tmds_oen)
return 0;
}
drm_dbg_kms(dev, "I2C write value mismatch during TMDS output buffer %s\n",
enable ? "enabling" : "disabling");
return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
/**
* drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
* @type: DP dual mode adaptor type
*
* Returns:
* String representation of the DP dual mode adaptor type
*/
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
{
switch (type) {
case DRM_DP_DUAL_MODE_NONE:
return "none";
case DRM_DP_DUAL_MODE_TYPE1_DVI:
return "type 1 DVI";
case DRM_DP_DUAL_MODE_TYPE1_HDMI:
return "type 1 HDMI";
case DRM_DP_DUAL_MODE_TYPE2_DVI:
return "type 2 DVI";
case DRM_DP_DUAL_MODE_TYPE2_HDMI:
return "type 2 HDMI";
case DRM_DP_DUAL_MODE_LSPCON:
return "lspcon";
default:
WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
return "unknown";
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
/**
* drm_lspcon_get_mode: Get LSPCON's current mode of operation by
* reading offset (0x80, 0x41)
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: current lspcon mode of operation output variable
*
* Returns:
* 0 on success, sets the current_mode value to appropriate mode
* -error on failure
*/
int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *mode)
{
u8 data;
int ret = 0;
int retry;
if (!mode) {
drm_err(dev, "NULL input\n");
return -EINVAL;
}
/* Read Status: i2c over aux */
for (retry = 0; retry < 6; retry++) {
if (retry)
usleep_range(500, 1000);
ret = drm_dp_dual_mode_read(adapter,
DP_DUAL_MODE_LSPCON_CURRENT_MODE,
&data, sizeof(data));
if (!ret)
break;
}
if (ret < 0) {
drm_dbg_kms(dev, "LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
*mode = DRM_LSPCON_MODE_PCON;
else
*mode = DRM_LSPCON_MODE_LS;
return 0;
}
EXPORT_SYMBOL(drm_lspcon_get_mode);
/**
* drm_lspcon_set_mode: Change LSPCON's mode of operation by
* writing offset (0x80, 0x40)
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode mode)
{
u8 data = 0;
int ret;
int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
data = DP_DUAL_MODE_LSPCON_MODE_PCON;
/* Change mode */
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
&data, sizeof(data));
if (ret < 0) {
drm_err(dev, "LSPCON mode change failed\n");
return ret;
}
/*
* Confirm mode change by reading the status bit.
* Sometimes, it takes a while to change the mode,
* so wait and retry until time out or done.
*/
do {
ret = drm_lspcon_get_mode(dev, adapter, ¤t_mode);
if (ret) {
drm_err(dev, "can't confirm LSPCON mode change\n");
return ret;
} else {
if (current_mode != mode) {
msleep(10);
time_out -= 10;
} else {
drm_dbg_kms(dev, "LSPCON mode changed to %s\n",
mode == DRM_LSPCON_MODE_LS ? "LS" : "PCON");
return 0;
}
}
} while (time_out);
drm_err(dev, "LSPCON mode change timed out\n");
return -ETIMEDOUT;
}
EXPORT_SYMBOL(drm_lspcon_set_mode);
| linux-master | drivers/gpu/drm/display/drm_dp_dual_mode_helper.c |
/*
* Copyright © 2009 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_panel.h>
#include "drm_dp_helper_internal.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
"DRM_UT_DRIVER",
"DRM_UT_KMS",
"DRM_UT_PRIME",
"DRM_UT_ATOMIC",
"DRM_UT_VBL",
"DRM_UT_STATE",
"DRM_UT_LEASE",
"DRM_UT_DP",
"DRM_UT_DRMRES");
struct dp_aux_backlight {
struct backlight_device *base;
struct drm_dp_aux *aux;
struct drm_edp_backlight_info info;
bool enabled;
};
/**
* DOC: dp helpers
*
* These functions contain some common logic and helpers at various abstraction
* levels to deal with Display Port sink devices and related things like DP aux
* channel transfers, EDID reading over DP aux channels, decoding certain DPCD
* blocks, ...
*/
/* Helpers for DP link training */
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
u8 lane_status;
int lane;
lane_align = dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
u8 lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
/* DP 2.0 128b/132b */
u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT :
DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
/* DP 2.0 errata for 128b/132b */
bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align, lane_status;
int lane;
lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
if (!(lane_align & DP_INTERLANE_ALIGN_DONE))
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if (!(lane_status & DP_LANE_CHANNEL_EQ_DONE))
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_128b132b_lane_channel_eq_done);
/* DP 2.0 errata for 128b/132b */
bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_status;
int lane;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if (!(lane_status & DP_LANE_SYMBOL_LOCKED))
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_128b132b_lane_symbol_locked);
/* DP 2.0 errata for 128b/132b */
bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
return status & DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE;
}
EXPORT_SYMBOL(drm_dp_128b132b_eq_interlane_align_done);
/* DP 2.0 errata for 128b/132b */
bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
return status & DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE;
}
EXPORT_SYMBOL(drm_dp_128b132b_cds_interlane_align_done);
/* DP 2.0 errata for 128b/132b */
bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
return status & DP_128B132B_LT_FAILED;
}
EXPORT_SYMBOL(drm_dp_128b132b_link_training_failed);
static int __8b10b_clock_recovery_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
{
if (rd_interval > 4)
drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n",
aux->name, rd_interval);
if (rd_interval == 0)
return 100;
return rd_interval * 4 * USEC_PER_MSEC;
}
static int __8b10b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
{
if (rd_interval > 4)
drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n",
aux->name, rd_interval);
if (rd_interval == 0)
return 400;
return rd_interval * 4 * USEC_PER_MSEC;
}
static int __128b132b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
{
switch (rd_interval) {
default:
drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x\n",
aux->name, rd_interval);
fallthrough;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US:
return 400;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS:
return 4000;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS:
return 8000;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS:
return 12000;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS:
return 16000;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS:
return 32000;
case DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS:
return 64000;
}
}
/*
* The link training delays are different for:
*
* - Clock recovery vs. channel equalization
* - DPRX vs. LTTPR
* - 128b/132b vs. 8b/10b
* - DPCD rev 1.3 vs. later
*
* Get the correct delay in us, reading DPCD if necessary.
*/
static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
enum drm_dp_phy dp_phy, bool uhbr, bool cr)
{
int (*parse)(const struct drm_dp_aux *aux, u8 rd_interval);
unsigned int offset;
u8 rd_interval, mask;
if (dp_phy == DP_PHY_DPRX) {
if (uhbr) {
if (cr)
return 100;
offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL;
mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
parse = __128b132b_channel_eq_delay_us;
} else {
if (cr && dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
return 100;
offset = DP_TRAINING_AUX_RD_INTERVAL;
mask = DP_TRAINING_AUX_RD_MASK;
if (cr)
parse = __8b10b_clock_recovery_delay_us;
else
parse = __8b10b_channel_eq_delay_us;
}
} else {
if (uhbr) {
offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy);
mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
parse = __128b132b_channel_eq_delay_us;
} else {
if (cr)
return 100;
offset = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy);
mask = DP_TRAINING_AUX_RD_MASK;
parse = __8b10b_channel_eq_delay_us;
}
}
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
return 400;
}
}
return parse(aux, rd_interval & mask);
}
int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
enum drm_dp_phy dp_phy, bool uhbr)
{
return __read_delay(aux, dpcd, dp_phy, uhbr, true);
}
EXPORT_SYMBOL(drm_dp_read_clock_recovery_delay);
int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
enum drm_dp_phy dp_phy, bool uhbr)
{
return __read_delay(aux, dpcd, dp_phy, uhbr, false);
}
EXPORT_SYMBOL(drm_dp_read_channel_eq_delay);
/* Per DP 2.0 Errata */
int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
{
int unit;
u8 val;
if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
val = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
}
unit = (val & DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT) ? 1 : 2;
val &= DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
return (val + 1) * unit * 1000;
}
EXPORT_SYMBOL(drm_dp_128b132b_read_aux_rd_interval);
void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK;
int delay_us;
if (dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
delay_us = 100;
else
delay_us = __8b10b_clock_recovery_delay_us(aux, rd_interval);
usleep_range(delay_us, delay_us * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
u8 rd_interval)
{
int delay_us = __8b10b_channel_eq_delay_us(aux, rd_interval);
usleep_range(delay_us, delay_us * 2);
}
void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
__drm_dp_link_train_channel_eq_delay(aux,
dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
/**
* drm_dp_phy_name() - Get the name of the given DP PHY
* @dp_phy: The DP PHY identifier
*
* Given the @dp_phy, get a user friendly name of the DP PHY, either "DPRX" or
* "LTTPR <N>", or "<INVALID DP PHY>" on errors. The returned string is always
* non-NULL and valid.
*
* Returns: Name of the DP PHY.
*/
const char *drm_dp_phy_name(enum drm_dp_phy dp_phy)
{
static const char * const phy_names[] = {
[DP_PHY_DPRX] = "DPRX",
[DP_PHY_LTTPR1] = "LTTPR 1",
[DP_PHY_LTTPR2] = "LTTPR 2",
[DP_PHY_LTTPR3] = "LTTPR 3",
[DP_PHY_LTTPR4] = "LTTPR 4",
[DP_PHY_LTTPR5] = "LTTPR 5",
[DP_PHY_LTTPR6] = "LTTPR 6",
[DP_PHY_LTTPR7] = "LTTPR 7",
[DP_PHY_LTTPR8] = "LTTPR 8",
};
if (dp_phy < 0 || dp_phy >= ARRAY_SIZE(phy_names) ||
WARN_ON(!phy_names[dp_phy]))
return "<INVALID DP PHY>";
return phy_names[dp_phy];
}
EXPORT_SYMBOL(drm_dp_phy_name);
void drm_dp_lttpr_link_train_clock_recovery_delay(void)
{
usleep_range(100, 200);
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_clock_recovery_delay);
static u8 dp_lttpr_phy_cap(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE], int r)
{
return phy_cap[r - DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1];
}
void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE])
{
u8 interval = dp_lttpr_phy_cap(phy_cap,
DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) &
DP_TRAINING_AUX_RD_MASK;
__drm_dp_link_train_channel_eq_delay(aux, interval);
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
case 1000000:
return DP_LINK_BW_10;
case 1350000:
return DP_LINK_BW_13_5;
case 2000000:
return DP_LINK_BW_20;
default:
/* Spec says link_bw = link_rate / 0.27Gbps */
return link_rate / 27000;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
case DP_LINK_BW_10:
return 1000000;
case DP_LINK_BW_13_5:
return 1350000;
case DP_LINK_BW_20:
return 2000000;
default:
/* Spec says link_rate = link_bw * 0.27Gbps */
return link_bw * 27000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
#define AUX_RETRY_INTERVAL 500 /* us */
static inline void
drm_dp_dump_access(const struct drm_dp_aux *aux,
u8 request, uint offset, void *buffer, int ret)
{
const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
if (ret > 0)
drm_dbg_dp(aux->drm_dev, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
aux->name, offset, arrow, ret, min(ret, 20), buffer);
else
drm_dbg_dp(aux->drm_dev, "%s: 0x%05x AUX %s (ret=%3d)\n",
aux->name, offset, arrow, ret);
}
/**
* DOC: dp helpers
*
* The DisplayPort AUX channel is an abstraction to allow generic, driver-
* independent access to AUX functionality. Drivers can take advantage of
* this by filling in the fields of the drm_dp_aux structure.
*
* Transactions are described using a hardware-independent drm_dp_aux_msg
* structure, which is passed into a driver's .transfer() implementation.
* Both native and I2C-over-AUX transactions are supported.
*/
static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
unsigned int offset, void *buffer, size_t size)
{
struct drm_dp_aux_msg msg;
unsigned int retry, native_reply;
int err = 0, ret = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
msg.request = request;
msg.buffer = buffer;
msg.size = size;
mutex_lock(&aux->hw_mutex);
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
* aux i2c transactions but real world devices this wasn't
* sufficient, bump to 32 which makes Dell 4k monitors happier.
*/
for (retry = 0; retry < 32; retry++) {
if (ret != 0 && ret != -ETIMEDOUT) {
usleep_range(AUX_RETRY_INTERVAL,
AUX_RETRY_INTERVAL + 100);
}
ret = aux->transfer(aux, &msg);
if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
if (ret == size)
goto unlock;
ret = -EPROTO;
} else
ret = -EIO;
}
/*
* We want the error we return to be the error we received on
* the first transaction, since we may get a different error the
* next time we retry
*/
if (!err)
err = ret;
}
drm_dbg_kms(aux->drm_dev, "%s: Too many retries, giving up. First error: %d\n",
aux->name, err);
ret = err;
unlock:
mutex_unlock(&aux->hw_mutex);
return ret;
}
/**
* drm_dp_dpcd_probe() - probe a given DPCD address with a 1-byte read access
* @aux: DisplayPort AUX channel (SST)
* @offset: address of the register to probe
*
* Probe the provided DPCD address by reading 1 byte from it. The function can
* be used to trigger some side-effect the read access has, like waking up the
* sink, without the need for the read-out value.
*
* Returns 0 if the read access suceeded, or a negative error code on failure.
*/
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
{
u8 buffer;
int ret;
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, 1);
WARN_ON(ret == 0);
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, ret);
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(drm_dp_dpcd_probe);
/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
* @buffer: buffer to store the register values
* @size: number of bytes in @buffer
*
* Returns the number of bytes transferred on success, or a negative error
* code on failure. -EIO is returned if the request was NAKed by the sink or
* if the retry count was exceeded. If not all bytes were transferred, this
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
int ret;
/*
* HP ZR24w corrupts the first DPCD access after entering power save
* mode. Eg. on a read, the entire buffer will be filled with the same
* byte. Do a throw away read to avoid corrupting anything we care
* about. Afterwards things will work correctly until the monitor
* gets woken up and subsequently re-enters power save mode.
*
* The user pressing any button on the monitor is enough to wake it
* up, so there is no particularly good place to do the workaround.
* We just have to do it before any DPCD access and hope that the
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
if (ret < 0)
return ret;
}
if (aux->is_remote)
ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size);
else
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
buffer, size);
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_read);
/**
* drm_dp_dpcd_write() - write a series of bytes to the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to write
* @buffer: buffer containing the values to write
* @size: number of bytes in @buffer
*
* Returns the number of bytes transferred on success, or a negative error
* code on failure. -EIO is returned if the request was NAKed by the sink or
* if the retry count was exceeded. If not all bytes were transferred, this
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
int ret;
if (aux->is_remote)
ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size);
else
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset,
buffer, size);
drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_write);
/**
* drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
* Returns the number of bytes transferred on success or a negative error
* code on failure.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
* drm_dp_dpcd_read_phy_link_status - get the link status information for a DP PHY
* @aux: DisplayPort AUX channel
* @dp_phy: the DP PHY to get the link status for
* @link_status: buffer to return the status in
*
* Fetch the AUX DPCD registers for the DPRX or an LTTPR PHY link status. The
* layout of the returned @link_status matches the DPCD register layout of the
* DPRX PHY link status.
*
* Returns 0 if the information was read successfully or a negative error code
* on failure.
*/
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
enum drm_dp_phy dp_phy,
u8 link_status[DP_LINK_STATUS_SIZE])
{
int ret;
if (dp_phy == DP_PHY_DPRX) {
ret = drm_dp_dpcd_read(aux,
DP_LANE0_1_STATUS,
link_status,
DP_LINK_STATUS_SIZE);
if (ret < 0)
return ret;
WARN_ON(ret != DP_LINK_STATUS_SIZE);
return 0;
}
ret = drm_dp_dpcd_read(aux,
DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
link_status,
DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
DP_LINK_STATUS_SIZE - (DP_SINK_STATUS - DP_LANE0_1_STATUS) - 1);
link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS] = 0;
return 0;
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
static bool is_edid_digital_input_dp(const struct edid *edid)
{
return edid && edid->revision >= 4 &&
edid->input & DRM_EDID_INPUT_DIGITAL &&
(edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_DP;
}
/**
* drm_dp_downstream_is_type() - is the downstream facing port of certain type?
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
* @type: port type to be checked. Can be:
* %DP_DS_PORT_TYPE_DP, %DP_DS_PORT_TYPE_VGA, %DP_DS_PORT_TYPE_DVI,
* %DP_DS_PORT_TYPE_HDMI, %DP_DS_PORT_TYPE_NON_EDID,
* %DP_DS_PORT_TYPE_DP_DUALMODE or %DP_DS_PORT_TYPE_WIRELESS.
*
* Caveat: Only works with DPCD 1.1+ port caps.
*
* Returns: whether the downstream facing port matches the type.
*/
bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4], u8 type)
{
return drm_dp_is_branch(dpcd) &&
dpcd[DP_DPCD_REV] >= 0x11 &&
(port_cap[0] & DP_DS_PORT_TYPE_MASK) == type;
}
EXPORT_SYMBOL(drm_dp_downstream_is_type);
/**
* drm_dp_downstream_is_tmds() - is the downstream facing port TMDS?
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
* @edid: EDID
*
* Returns: whether the downstream facing port is TMDS (HDMI/DVI).
*/
bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
const struct edid *edid)
{
if (dpcd[DP_DPCD_REV] < 0x11) {
switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
case DP_DWN_STRM_PORT_TYPE_TMDS:
return true;
default:
return false;
}
}
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_DP_DUALMODE:
if (is_edid_digital_input_dp(edid))
return false;
fallthrough;
case DP_DS_PORT_TYPE_DVI:
case DP_DS_PORT_TYPE_HDMI:
return true;
default:
return false;
}
}
EXPORT_SYMBOL(drm_dp_downstream_is_tmds);
/**
* drm_dp_send_real_edid_checksum() - send back real edid checksum value
* @aux: DisplayPort AUX channel
* @real_edid_checksum: real edid checksum for the last block
*
* Returns:
* True on success
*/
bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
u8 real_edid_checksum)
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
&auto_test_req, 1) < 1) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
}
link_edid_read &= DP_TEST_LINK_EDID_READ;
if (!auto_test_req || !link_edid_read) {
drm_dbg_kms(aux->drm_dev, "%s: Source DUT does not support TEST_EDID_READ\n",
aux->name);
return false;
}
if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
&auto_test_req, 1) < 1) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
&real_edid_checksum, 1) < 1) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 port_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_PORT_COUNT_MASK;
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE && port_count > 4)
port_count = 4;
return port_count;
}
static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
int ret;
/*
* Prior to DP1.3 the bit represented by
* DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
* If it is set DP_DPCD_REV at 0000h could be at a value less than
* the true capability of the panel. The only way to check is to
* then compare 0000h and 2200h.
*/
if (!(dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
sizeof(dpcd_ext));
if (ret < 0)
return ret;
if (ret != sizeof(dpcd_ext))
return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
"%s: Extended DPCD rev less than base DPCD rev (%d > %d)\n",
aux->name, dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
return 0;
}
if (!memcmp(dpcd, dpcd_ext, sizeof(dpcd_ext)))
return 0;
drm_dbg_kms(aux->drm_dev, "%s: Base DPCD: %*ph\n", aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
memcpy(dpcd, dpcd_ext, sizeof(dpcd_ext));
return 0;
}
/**
* drm_dp_read_dpcd_caps() - read DPCD caps and extended DPCD caps if
* available
* @aux: DisplayPort AUX channel
* @dpcd: Buffer to store the resulting DPCD in
*
* Attempts to read the base DPCD caps for @aux. Additionally, this function
* checks for and reads the extended DPRX caps (%DP_DP13_DPCD_REV) if
* present.
*
* Returns: %0 if the DPCD was read successfully, negative error code
* otherwise.
*/
int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
int ret;
ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
if (ret < 0)
return ret;
drm_dbg_kms(aux->drm_dev, "%s: DPCD: %*ph\n", aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
return ret;
}
EXPORT_SYMBOL(drm_dp_read_dpcd_caps);
/**
* drm_dp_read_downstream_info() - read DPCD downstream port info if available
* @aux: DisplayPort AUX channel
* @dpcd: A cached copy of the port's DPCD
* @downstream_ports: buffer to store the downstream port info in
*
* See also:
* drm_dp_downstream_max_clock()
* drm_dp_downstream_max_bpc()
*
* Returns: 0 if either the downstream port info was read successfully or
* there was no downstream info to read, or a negative error code otherwise.
*/
int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS])
{
int ret;
u8 len;
memset(downstream_ports, 0, DP_MAX_DOWNSTREAM_PORTS);
/* No downstream info to read */
if (!drm_dp_is_branch(dpcd) || dpcd[DP_DPCD_REV] == DP_DPCD_REV_10)
return 0;
/* Some branches advertise having 0 downstream ports, despite also advertising they have a
* downstream port present. The DP spec isn't clear on if this is allowed or not, but since
* some branches do it we need to handle it regardless.
*/
len = drm_dp_downstream_port_count(dpcd);
if (!len)
return 0;
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
if (ret != len)
return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_downstream_info);
/**
* drm_dp_downstream_max_dotclock() - extract downstream facing port max dot clock
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
* Returns: Downstream facing port max dot clock in kHz on success,
* or 0 if max clock not defined
*/
int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
if (!drm_dp_is_branch(dpcd))
return 0;
if (dpcd[DP_DPCD_REV] < 0x11)
return 0;
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_VGA:
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return 0;
return port_cap[1] * 8000;
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dp_downstream_max_dotclock);
/**
* drm_dp_downstream_max_tmds_clock() - extract downstream facing port max TMDS clock
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
* @edid: EDID
*
* Returns: HDMI/DVI downstream facing port max TMDS clock in kHz on success,
* or 0 if max TMDS clock not defined
*/
int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
const struct edid *edid)
{
if (!drm_dp_is_branch(dpcd))
return 0;
if (dpcd[DP_DPCD_REV] < 0x11) {
switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
case DP_DWN_STRM_PORT_TYPE_TMDS:
return 165000;
default:
return 0;
}
}
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_DP_DUALMODE:
if (is_edid_digital_input_dp(edid))
return 0;
/*
* It's left up to the driver to check the
* DP dual mode adapter's max TMDS clock.
*
* Unfortunately it looks like branch devices
* may not fordward that the DP dual mode i2c
* access so we just usually get i2c nak :(
*/
fallthrough;
case DP_DS_PORT_TYPE_HDMI:
/*
* We should perhaps assume 165 MHz when detailed cap
* info is not available. But looks like many typical
* branch devices fall into that category and so we'd
* probably end up with users complaining that they can't
* get high resolution modes with their favorite dongle.
*
* So let's limit to 300 MHz instead since DPCD 1.4
* HDMI 2.0 DFPs are required to have the detailed cap
* info. So it's more likely we're dealing with a HDMI 1.4
* compatible* device here.
*/
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return 300000;
return port_cap[1] * 2500;
case DP_DS_PORT_TYPE_DVI:
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return 165000;
/* FIXME what to do about DVI dual link? */
return port_cap[1] * 2500;
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dp_downstream_max_tmds_clock);
/**
* drm_dp_downstream_min_tmds_clock() - extract downstream facing port min TMDS clock
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
* @edid: EDID
*
* Returns: HDMI/DVI downstream facing port min TMDS clock in kHz on success,
* or 0 if max TMDS clock not defined
*/
int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
const struct edid *edid)
{
if (!drm_dp_is_branch(dpcd))
return 0;
if (dpcd[DP_DPCD_REV] < 0x11) {
switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
case DP_DWN_STRM_PORT_TYPE_TMDS:
return 25000;
default:
return 0;
}
}
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_DP_DUALMODE:
if (is_edid_digital_input_dp(edid))
return 0;
fallthrough;
case DP_DS_PORT_TYPE_DVI:
case DP_DS_PORT_TYPE_HDMI:
/*
* Unclear whether the protocol converter could
* utilize pixel replication. Assume it won't.
*/
return 25000;
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dp_downstream_min_tmds_clock);
/**
* drm_dp_downstream_max_bpc() - extract downstream facing port max
* bits per component
* @dpcd: DisplayPort configuration data
* @port_cap: downstream facing port capabilities
* @edid: EDID
*
* Returns: Max bpc on success or 0 if max bpc not defined
*/
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
const struct edid *edid)
{
if (!drm_dp_is_branch(dpcd))
return 0;
if (dpcd[DP_DPCD_REV] < 0x11) {
switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
case DP_DWN_STRM_PORT_TYPE_DP:
return 0;
default:
return 8;
}
}
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_DP:
return 0;
case DP_DS_PORT_TYPE_DP_DUALMODE:
if (is_edid_digital_input_dp(edid))
return 0;
fallthrough;
case DP_DS_PORT_TYPE_HDMI:
case DP_DS_PORT_TYPE_DVI:
case DP_DS_PORT_TYPE_VGA:
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return 8;
switch (port_cap[2] & DP_DS_MAX_BPC_MASK) {
case DP_DS_8BPC:
return 8;
case DP_DS_10BPC:
return 10;
case DP_DS_12BPC:
return 12;
case DP_DS_16BPC:
return 16;
default:
return 8;
}
break;
default:
return 8;
}
}
EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
/**
* drm_dp_downstream_420_passthrough() - determine downstream facing port
* YCbCr 4:2:0 pass-through capability
* @dpcd: DisplayPort configuration data
* @port_cap: downstream facing port capabilities
*
* Returns: whether the downstream facing port can pass through YCbCr 4:2:0
*/
bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
if (!drm_dp_is_branch(dpcd))
return false;
if (dpcd[DP_DPCD_REV] < 0x13)
return false;
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_DP:
return true;
case DP_DS_PORT_TYPE_HDMI:
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return false;
return port_cap[3] & DP_DS_HDMI_YCBCR420_PASS_THROUGH;
default:
return false;
}
}
EXPORT_SYMBOL(drm_dp_downstream_420_passthrough);
/**
* drm_dp_downstream_444_to_420_conversion() - determine downstream facing port
* YCbCr 4:4:4->4:2:0 conversion capability
* @dpcd: DisplayPort configuration data
* @port_cap: downstream facing port capabilities
*
* Returns: whether the downstream facing port can convert YCbCr 4:4:4 to 4:2:0
*/
bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
if (!drm_dp_is_branch(dpcd))
return false;
if (dpcd[DP_DPCD_REV] < 0x13)
return false;
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_HDMI:
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return false;
return port_cap[3] & DP_DS_HDMI_YCBCR444_TO_420_CONV;
default:
return false;
}
}
EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
/**
* drm_dp_downstream_rgb_to_ycbcr_conversion() - determine downstream facing port
* RGB->YCbCr conversion capability
* @dpcd: DisplayPort configuration data
* @port_cap: downstream facing port capabilities
* @color_spc: Colorspace for which conversion cap is sought
*
* Returns: whether the downstream facing port can convert RGB->YCbCr for a given
* colorspace.
*/
bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
u8 color_spc)
{
if (!drm_dp_is_branch(dpcd))
return false;
if (dpcd[DP_DPCD_REV] < 0x13)
return false;
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_HDMI:
if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
return false;
return port_cap[3] & color_spc;
default:
return false;
}
}
EXPORT_SYMBOL(drm_dp_downstream_rgb_to_ycbcr_conversion);
/**
* drm_dp_downstream_mode() - return a mode for downstream facing port
* @dev: DRM device
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
* Provides a suitable mode for downstream facing ports without EDID.
*
* Returns: A new drm_display_mode on success or NULL on failure
*/
struct drm_display_mode *
drm_dp_downstream_mode(struct drm_device *dev,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
u8 vic;
if (!drm_dp_is_branch(dpcd))
return NULL;
if (dpcd[DP_DPCD_REV] < 0x11)
return NULL;
switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_NON_EDID:
switch (port_cap[0] & DP_DS_NON_EDID_MASK) {
case DP_DS_NON_EDID_720x480i_60:
vic = 6;
break;
case DP_DS_NON_EDID_720x480i_50:
vic = 21;
break;
case DP_DS_NON_EDID_1920x1080i_60:
vic = 5;
break;
case DP_DS_NON_EDID_1920x1080i_50:
vic = 20;
break;
case DP_DS_NON_EDID_1280x720_60:
vic = 4;
break;
case DP_DS_NON_EDID_1280x720_50:
vic = 19;
break;
default:
return NULL;
}
return drm_display_mode_from_cea_vic(dev, vic);
default:
return NULL;
}
}
EXPORT_SYMBOL(drm_dp_downstream_mode);
/**
* drm_dp_downstream_id() - identify branch device
* @aux: DisplayPort AUX channel
* @id: DisplayPort branch device id
*
* Returns branch device id on success or NULL on failure
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
/**
* drm_dp_downstream_debug() - debug DP branch devices
* @m: pointer for debugfs file
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
* @edid: EDID
* @aux: DisplayPort AUX channel
*
*/
void drm_dp_downstream_debug(struct seq_file *m,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
const struct edid *edid,
struct drm_dp_aux *aux)
{
bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
char id[7];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
bool branch_device = drm_dp_is_branch(dpcd);
seq_printf(m, "\tDP branch device present: %s\n",
str_yes_no(branch_device));
if (!branch_device)
return;
switch (type) {
case DP_DS_PORT_TYPE_DP:
seq_puts(m, "\t\tType: DisplayPort\n");
break;
case DP_DS_PORT_TYPE_VGA:
seq_puts(m, "\t\tType: VGA\n");
break;
case DP_DS_PORT_TYPE_DVI:
seq_puts(m, "\t\tType: DVI\n");
break;
case DP_DS_PORT_TYPE_HDMI:
seq_puts(m, "\t\tType: HDMI\n");
break;
case DP_DS_PORT_TYPE_NON_EDID:
seq_puts(m, "\t\tType: others without EDID support\n");
break;
case DP_DS_PORT_TYPE_DP_DUALMODE:
seq_puts(m, "\t\tType: DP++\n");
break;
case DP_DS_PORT_TYPE_WIRELESS:
seq_puts(m, "\t\tType: Wireless\n");
break;
default:
seq_puts(m, "\t\tType: N/A\n");
}
memset(id, 0, sizeof(id));
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
if (len > 0)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
clk = drm_dp_downstream_max_dotclock(dpcd, port_cap);
if (clk > 0)
seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, edid);
if (clk > 0)
seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, edid);
if (clk > 0)
seq_printf(m, "\t\tMin TMDS clock: %d kHz\n", clk);
bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, edid);
if (bpc > 0)
seq_printf(m, "\t\tMax bpc: %d\n", bpc);
}
}
EXPORT_SYMBOL(drm_dp_downstream_debug);
/**
* drm_dp_subconnector_type() - get DP branch device type
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*/
enum drm_mode_subconnector
drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
int type;
if (!drm_dp_is_branch(dpcd))
return DRM_MODE_SUBCONNECTOR_Native;
/* DP 1.0 approach */
if (dpcd[DP_DPCD_REV] == DP_DPCD_REV_10) {
type = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_TYPE_MASK;
switch (type) {
case DP_DWN_STRM_PORT_TYPE_TMDS:
/* Can be HDMI or DVI-D, DVI-D is a safer option */
return DRM_MODE_SUBCONNECTOR_DVID;
case DP_DWN_STRM_PORT_TYPE_ANALOG:
/* Can be VGA or DVI-A, VGA is more popular */
return DRM_MODE_SUBCONNECTOR_VGA;
case DP_DWN_STRM_PORT_TYPE_DP:
return DRM_MODE_SUBCONNECTOR_DisplayPort;
case DP_DWN_STRM_PORT_TYPE_OTHER:
default:
return DRM_MODE_SUBCONNECTOR_Unknown;
}
}
type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
switch (type) {
case DP_DS_PORT_TYPE_DP:
case DP_DS_PORT_TYPE_DP_DUALMODE:
return DRM_MODE_SUBCONNECTOR_DisplayPort;
case DP_DS_PORT_TYPE_VGA:
return DRM_MODE_SUBCONNECTOR_VGA;
case DP_DS_PORT_TYPE_DVI:
return DRM_MODE_SUBCONNECTOR_DVID;
case DP_DS_PORT_TYPE_HDMI:
return DRM_MODE_SUBCONNECTOR_HDMIA;
case DP_DS_PORT_TYPE_WIRELESS:
return DRM_MODE_SUBCONNECTOR_Wireless;
case DP_DS_PORT_TYPE_NON_EDID:
default:
return DRM_MODE_SUBCONNECTOR_Unknown;
}
}
EXPORT_SYMBOL(drm_dp_subconnector_type);
/**
* drm_dp_set_subconnector_property - set subconnector for DP connector
* @connector: connector to set property on
* @status: connector status
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
* Called by a driver on every detect event.
*/
void drm_dp_set_subconnector_property(struct drm_connector *connector,
enum drm_connector_status status,
const u8 *dpcd,
const u8 port_cap[4])
{
enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
if (status == connector_status_connected)
subconnector = drm_dp_subconnector_type(dpcd, port_cap);
drm_object_property_set_value(&connector->base,
connector->dev->mode_config.dp_subconnector_property,
subconnector);
}
EXPORT_SYMBOL(drm_dp_set_subconnector_property);
/**
* drm_dp_read_sink_count_cap() - Check whether a given connector has a valid sink
* count
* @connector: The DRM connector to check
* @dpcd: A cached copy of the connector's DPCD RX capabilities
* @desc: A cached copy of the connector's DP descriptor
*
* See also: drm_dp_read_sink_count()
*
* Returns: %True if the (e)DP connector has a valid sink count that should
* be probed, %false otherwise.
*/
bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const struct drm_dp_desc *desc)
{
/* Some eDP panels don't set a valid value for the sink count */
return connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 &&
dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
!drm_dp_has_quirk(desc, DP_DPCD_QUIRK_NO_SINK_COUNT);
}
EXPORT_SYMBOL(drm_dp_read_sink_count_cap);
/**
* drm_dp_read_sink_count() - Retrieve the sink count for a given sink
* @aux: The DP AUX channel to use
*
* See also: drm_dp_read_sink_count_cap()
*
* Returns: The current sink count reported by @aux, or a negative error code
* otherwise.
*/
int drm_dp_read_sink_count(struct drm_dp_aux *aux)
{
u8 count;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
if (ret != 1)
return -EIO;
return DP_GET_SINK_COUNT(count);
}
EXPORT_SYMBOL(drm_dp_read_sink_count);
/*
* I2C-over-AUX implementation
*/
static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
static void drm_dp_i2c_msg_write_status_update(struct drm_dp_aux_msg *msg)
{
/*
* In case of i2c defer or short i2c ack reply to a write,
* we need to switch to WRITE_STATUS_UPDATE to drain the
* rest of the message
*/
if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) {
msg->request &= DP_AUX_I2C_MOT;
msg->request |= DP_AUX_I2C_WRITE_STATUS_UPDATE;
}
}
#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
#define AUX_STOP_LEN 4
#define AUX_CMD_LEN 4
#define AUX_ADDRESS_LEN 20
#define AUX_REPLY_PAD_LEN 4
#define AUX_LENGTH_LEN 8
/*
* Calculate the duration of the AUX request/reply in usec. Gives the
* "best" case estimate, ie. successful while as short as possible.
*/
static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg)
{
int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN;
if ((msg->request & DP_AUX_I2C_READ) == 0)
len += msg->size * 8;
return len;
}
static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
{
int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
AUX_CMD_LEN + AUX_REPLY_PAD_LEN;
/*
* For read we expect what was asked. For writes there will
* be 0 or 1 data bytes. Assume 0 for the "best" case.
*/
if (msg->request & DP_AUX_I2C_READ)
len += msg->size * 8;
return len;
}
#define I2C_START_LEN 1
#define I2C_STOP_LEN 1
#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */
#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */
/*
* Calculate the length of the i2c transfer in usec, assuming
* the i2c bus speed is as specified. Gives the "worst"
* case estimate, ie. successful while as long as possible.
* Doesn't account the "MOT" bit, and instead assumes each
* message includes a START, ADDRESS and STOP. Neither does it
* account for additional random variables such as clock stretching.
*/
static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg,
int i2c_speed_khz)
{
/* AUX bitrate is 1MHz, i2c bitrate as specified */
return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN +
msg->size * I2C_DATA_LEN +
I2C_STOP_LEN) * 1000, i2c_speed_khz);
}
/*
* Determine how many retries should be attempted to successfully transfer
* the specified message, based on the estimated durations of the
* i2c and AUX transfers.
*/
static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg,
int i2c_speed_khz)
{
int aux_time_us = drm_dp_aux_req_duration(msg) +
drm_dp_aux_reply_duration(msg);
int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz);
return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL);
}
/*
* FIXME currently assumes 10 kHz as some real world devices seem
* to require it. We should query/set the speed via DPCD if supported.
*/
static int dp_aux_i2c_speed_khz __read_mostly = 10;
module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644);
MODULE_PARM_DESC(dp_aux_i2c_speed_khz,
"Assumed speed of the i2c bus in kHz, (1-400, default 10)");
/*
* Transfer a single I2C-over-AUX message and handle various error conditions,
* retrying the transaction as appropriate. It is assumed that the
* &drm_dp_aux.transfer function does not modify anything in the msg other than the
* reply field.
*
* Returns bytes transferred on success, or a negative error code on failure.
*/
static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
unsigned int retry, defer_i2c;
int ret;
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
* is required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
*
* We also try to account for the i2c bus speed.
*/
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
ret = aux->transfer(aux, msg);
if (ret < 0) {
if (ret == -EBUSY)
continue;
/*
* While timeouts can be errors, they're usually normal
* behavior (for instance, when a driver tries to
* communicate with a non-existent DisplayPort device).
* Avoid spamming the kernel log with timeout errors.
*/
if (ret == -ETIMEDOUT)
drm_dbg_kms_ratelimited(aux->drm_dev, "%s: transaction timed out\n",
aux->name);
else
drm_dbg_kms(aux->drm_dev, "%s: transaction failed: %d\n",
aux->name, ret);
return ret;
}
switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/*
* For I2C-over-AUX transactions this isn't enough, we
* need to check for the I2C ACK reply.
*/
break;
case DP_AUX_NATIVE_REPLY_NACK:
drm_dbg_kms(aux->drm_dev, "%s: native nack (result=%d, size=%zu)\n",
aux->name, ret, msg->size);
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
drm_dbg_kms(aux->drm_dev, "%s: native defer\n", aux->name);
/*
* We could check for I2C bit rate capabilities and if
* available adjust this interval. We could also be
* more careful with DP-to-legacy adapters where a
* long legacy cable may force very low I2C bit rates.
*
* For now just defer for long enough to hopefully be
* safe for all use-cases.
*/
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
continue;
default:
drm_err(aux->drm_dev, "%s: invalid native reply %#04x\n",
aux->name, msg->reply);
return -EREMOTEIO;
}
switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
/*
* Both native ACK and I2C ACK replies received. We
* can assume the transfer was successful.
*/
if (ret != msg->size)
drm_dp_i2c_msg_write_status_update(msg);
return ret;
case DP_AUX_I2C_REPLY_NACK:
drm_dbg_kms(aux->drm_dev, "%s: I2C nack (result=%d, size=%zu)\n",
aux->name, ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER:
drm_dbg_kms(aux->drm_dev, "%s: I2C defer\n", aux->name);
/* DP Compliance Test 4.2.2.5 Requirement:
* Must have at least 7 retries for I2C defers on the
* transaction to pass this test
*/
aux->i2c_defer_count++;
if (defer_i2c < 7)
defer_i2c++;
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
drm_dp_i2c_msg_write_status_update(msg);
continue;
default:
drm_err(aux->drm_dev, "%s: invalid I2C reply %#04x\n",
aux->name, msg->reply);
return -EREMOTEIO;
}
}
drm_dbg_kms(aux->drm_dev, "%s: Too many retries, giving up\n", aux->name);
return -EREMOTEIO;
}
static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg,
const struct i2c_msg *i2c_msg)
{
msg->request = (i2c_msg->flags & I2C_M_RD) ?
DP_AUX_I2C_READ : DP_AUX_I2C_WRITE;
if (!(i2c_msg->flags & I2C_M_STOP))
msg->request |= DP_AUX_I2C_MOT;
}
/*
* Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
*
* Returns an error code on failure, or a recommended transfer size on success.
*/
static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg)
{
int err, ret = orig_msg->size;
struct drm_dp_aux_msg msg = *orig_msg;
while (msg.size > 0) {
err = drm_dp_i2c_do_msg(aux, &msg);
if (err <= 0)
return err == 0 ? -EPROTO : err;
if (err < msg.size && err < ret) {
drm_dbg_kms(aux->drm_dev,
"%s: Partial I2C reply: requested %zu bytes got %d bytes\n",
aux->name, msg.size, err);
ret = err;
}
msg.size -= err;
msg.buffer += err;
}
return ret;
}
/*
* Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX
* packets to be as large as possible. If not, the I2C transactions never
* succeed. Hence the default is maximum.
*/
static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES;
module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644);
MODULE_PARM_DESC(dp_aux_i2c_transfer_size,
"Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)");
static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
unsigned int i, j;
unsigned transfer_size;
struct drm_dp_aux_msg msg;
int err = 0;
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
memset(&msg, 0, sizeof(msg));
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
/* Send a bare address packet to start the transaction.
* Zero sized messages specify an address only (bare
* address) transaction.
*/
msg.buffer = NULL;
msg.size = 0;
err = drm_dp_i2c_do_msg(aux, &msg);
/*
* Reset msg.request in case in case it got
* changed into a WRITE_STATUS_UPDATE.
*/
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
if (err < 0)
break;
/* We want each transaction to be as large as possible, but
* we'll go to smaller sizes if the hardware gives us a
* short reply.
*/
transfer_size = dp_aux_i2c_transfer_size;
for (j = 0; j < msgs[i].len; j += msg.size) {
msg.buffer = msgs[i].buf + j;
msg.size = min(transfer_size, msgs[i].len - j);
err = drm_dp_i2c_drain_msg(aux, &msg);
/*
* Reset msg.request in case in case it got
* changed into a WRITE_STATUS_UPDATE.
*/
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
if (err < 0)
break;
transfer_size = err;
}
if (err < 0)
break;
}
if (err >= 0)
err = num;
/* Send a bare address packet to close out the transaction.
* Zero sized messages specify an address only (bare
* address) transaction.
*/
msg.request &= ~DP_AUX_I2C_MOT;
msg.buffer = NULL;
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
return err;
}
static const struct i2c_algorithm drm_dp_i2c_algo = {
.functionality = drm_dp_i2c_functionality,
.master_xfer = drm_dp_i2c_xfer,
};
static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c)
{
return container_of(i2c, struct drm_dp_aux, ddc);
}
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
mutex_lock(&i2c_to_aux(i2c)->hw_mutex);
}
static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex);
}
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
}
static const struct i2c_lock_operations drm_dp_i2c_lock_ops = {
.lock_bus = lock_bus,
.trylock_bus = trylock_bus,
.unlock_bus = unlock_bus,
};
static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
{
u8 buf, count;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
count = buf & DP_TEST_COUNT_MASK;
if (count == aux->crc_count)
return -EAGAIN; /* No CRC yet */
aux->crc_count = count;
/*
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
if (ret < 0)
return ret;
return 0;
}
static void drm_dp_aux_crc_work(struct work_struct *work)
{
struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
crc_work);
struct drm_crtc *crtc;
u8 crc_bytes[6];
uint32_t crcs[3];
int ret;
if (WARN_ON(!aux->crtc))
return;
crtc = aux->crtc;
while (crtc->crc.opened) {
drm_crtc_wait_one_vblank(crtc);
if (!crtc->crc.opened)
break;
ret = drm_dp_aux_get_crc(aux, crc_bytes);
if (ret == -EAGAIN) {
usleep_range(1000, 2000);
ret = drm_dp_aux_get_crc(aux, crc_bytes);
}
if (ret == -EAGAIN) {
drm_dbg_kms(aux->drm_dev, "%s: Get CRC failed after retrying: %d\n",
aux->name, ret);
continue;
} else if (ret) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to get a CRC: %d\n", aux->name, ret);
continue;
}
crcs[0] = crc_bytes[0] | crc_bytes[1] << 8;
crcs[1] = crc_bytes[2] | crc_bytes[3] << 8;
crcs[2] = crc_bytes[4] | crc_bytes[5] << 8;
drm_crtc_add_crc_entry(crtc, false, 0, crcs);
}
}
/**
* drm_dp_remote_aux_init() - minimally initialise a remote aux channel
* @aux: DisplayPort AUX channel
*
* Used for remote aux channel in general. Merely initialize the crc work
* struct.
*/
void drm_dp_remote_aux_init(struct drm_dp_aux *aux)
{
INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
}
EXPORT_SYMBOL(drm_dp_remote_aux_init);
/**
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
* If you need to use the drm_dp_aux's i2c adapter prior to registering it with
* the outside world, call drm_dp_aux_init() first. For drivers which are
* grandparents to their AUX adapters (e.g. the AUX adapter is parented by a
* &drm_connector), you must still call drm_dp_aux_register() once the connector
* has been registered to allow userspace access to the auxiliary DP channel.
* Likewise, for such drivers you should also assign &drm_dp_aux.drm_dev as
* early as possible so that the &drm_device that corresponds to the AUX adapter
* may be mentioned in debugging output from the DRM DP helpers.
*
* For devices which use a separate platform device for their AUX adapters, this
* may be called as early as required by the driver.
*
*/
void drm_dp_aux_init(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
mutex_init(&aux->cec.lock);
INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
aux->ddc.lock_ops = &drm_dp_i2c_lock_ops;
}
EXPORT_SYMBOL(drm_dp_aux_init);
/**
* drm_dp_aux_register() - initialise and register aux channel
* @aux: DisplayPort AUX channel
*
* Automatically calls drm_dp_aux_init() if this hasn't been done yet. This
* should only be called once the parent of @aux, &drm_dp_aux.dev, is
* initialized. For devices which are grandparents of their AUX channels,
* &drm_dp_aux.dev will typically be the &drm_connector &device which
* corresponds to @aux. For these devices, it's advised to call
* drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to
* call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister.
* Functions which don't follow this will likely Oops when
* %CONFIG_DRM_DP_AUX_CHARDEV is enabled.
*
* For devices where the AUX channel is a device that exists independently of
* the &drm_device that uses it, such as SoCs and bridge devices, it is
* recommended to call drm_dp_aux_register() after a &drm_device has been
* assigned to &drm_dp_aux.drm_dev, and likewise to call
* drm_dp_aux_unregister() once the &drm_device should no longer be associated
* with the AUX channel (e.g. on bridge detach).
*
* Drivers which need to use the aux channel before either of the two points
* mentioned above need to call drm_dp_aux_init() in order to use the AUX
* channel before registration.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_aux_register(struct drm_dp_aux *aux)
{
int ret;
WARN_ON_ONCE(!aux->drm_dev);
if (!aux->ddc.algo)
drm_dp_aux_init(aux);
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
sizeof(aux->ddc.name));
ret = drm_dp_aux_register_devnode(aux);
if (ret)
return ret;
ret = i2c_add_adapter(&aux->ddc);
if (ret) {
drm_dp_aux_unregister_devnode(aux);
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_aux_register);
/**
* drm_dp_aux_unregister() - unregister an AUX adapter
* @aux: DisplayPort AUX channel
*/
void drm_dp_aux_unregister(struct drm_dp_aux *aux)
{
drm_dp_aux_unregister_devnode(aux);
i2c_del_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_unregister);
#define PSR_SETUP_TIME(x) [DP_PSR_SETUP_TIME_ ## x >> DP_PSR_SETUP_TIME_SHIFT] = (x)
/**
* drm_dp_psr_setup_time() - PSR setup in time usec
* @psr_cap: PSR capabilities from DPCD
*
* Returns:
* PSR setup time for the panel in microseconds, negative
* error code on failure.
*/
int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
{
static const u16 psr_setup_time_us[] = {
PSR_SETUP_TIME(330),
PSR_SETUP_TIME(275),
PSR_SETUP_TIME(220),
PSR_SETUP_TIME(165),
PSR_SETUP_TIME(110),
PSR_SETUP_TIME(55),
PSR_SETUP_TIME(0),
};
int i;
i = (psr_cap[1] & DP_PSR_SETUP_TIME_MASK) >> DP_PSR_SETUP_TIME_SHIFT;
if (i >= ARRAY_SIZE(psr_setup_time_us))
return -EINVAL;
return psr_setup_time_us[i];
}
EXPORT_SYMBOL(drm_dp_psr_setup_time);
#undef PSR_SETUP_TIME
/**
* drm_dp_start_crc() - start capture of frame CRCs
* @aux: DisplayPort AUX channel
* @crtc: CRTC displaying the frames whose CRCs are to be captured
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
{
u8 buf;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
aux->crc_count = 0;
aux->crtc = crtc;
schedule_work(&aux->crc_work);
return 0;
}
EXPORT_SYMBOL(drm_dp_start_crc);
/**
* drm_dp_stop_crc() - stop capture of frame CRCs
* @aux: DisplayPort AUX channel
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_stop_crc(struct drm_dp_aux *aux)
{
u8 buf;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
flush_work(&aux->crc_work);
aux->crtc = NULL;
return 0;
}
EXPORT_SYMBOL(drm_dp_stop_crc);
struct dpcd_quirk {
u8 oui[3];
u8 device_id[6];
bool is_branch;
u32 quirks;
};
#define OUI(first, second, third) { (first), (second), (third) }
#define DEVICE_ID(first, second, third, fourth, fifth, sixth) \
{ (first), (second), (third), (fourth), (fifth), (sixth) }
#define DEVICE_ID_ANY DEVICE_ID(0, 0, 0, 0, 0, 0)
static const struct dpcd_quirk dpcd_quirk_list[] = {
/* Analogix 7737 needs reduced M and N at HBR2 link rates */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* LG LP140WF6-SPM1 eDP panel */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* Apple panels need some additional handling to support PSR */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
/* CH7511 seems to leave SINK_COUNT zeroed */
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
#undef OUI
/*
* Get a bit mask of DPCD quirks for the sink/branch device identified by
* ident. The quirk data is shared but it's up to the drivers to act on the
* data.
*
* For now, only the OUI (first three bytes) is used, but this may be extended
* to device identification string and hardware/firmware revisions later.
*/
static u32
drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
{
const struct dpcd_quirk *quirk;
u32 quirks = 0;
int i;
u8 any_device[] = DEVICE_ID_ANY;
for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
quirk = &dpcd_quirk_list[i];
if (quirk->is_branch != is_branch)
continue;
if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
continue;
if (memcmp(quirk->device_id, any_device, sizeof(any_device)) != 0 &&
memcmp(quirk->device_id, ident->device_id, sizeof(ident->device_id)) != 0)
continue;
quirks |= quirk->quirks;
}
return quirks;
}
#undef DEVICE_ID_ANY
#undef DEVICE_ID
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
* @desc: Device descriptor to fill from DPCD
* @is_branch: true for branch devices, false for sink devices
*
* Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
* identification.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch)
{
struct drm_dp_dpcd_ident *ident = &desc->ident;
unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
int ret, dev_id_len;
ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
if (ret < 0)
return ret;
desc->quirks = drm_dp_get_quirks(ident, is_branch);
dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
drm_dbg_kms(aux->drm_dev,
"%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
aux->name, is_branch ? "branch" : "sink",
(int)sizeof(ident->oui), ident->oui, dev_id_len,
ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf,
ident->sw_major_rev, ident->sw_minor_rev, desc->quirks);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
/**
* drm_dp_dsc_sink_max_slice_count() - Get the max slice count
* supported by the DSC sink.
* @dsc_dpcd: DSC capabilities from DPCD
* @is_edp: true if its eDP, false for DP
*
* Read the slice capabilities DPCD register from DSC sink to get
* the maximum slice count supported. This is used to populate
* the DSC parameters in the &struct drm_dsc_config by the driver.
* Driver creates an infoframe using these parameters to populate
* &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC
* infoframe using the helper function drm_dsc_pps_infoframe_pack()
*
* Returns:
* Maximum slice count supported by DSC sink or 0 its invalid
*/
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp)
{
u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
if (is_edp) {
/* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
return 4;
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
return 2;
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
return 1;
} else {
/* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
return 24;
if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
return 20;
if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
return 16;
if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
return 12;
if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
return 10;
if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
return 8;
if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
return 6;
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
return 4;
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
return 2;
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
return 1;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
/**
* drm_dp_dsc_sink_line_buf_depth() - Get the line buffer depth in bits
* @dsc_dpcd: DSC capabilities from DPCD
*
* Read the DSC DPCD register to parse the line buffer depth in bits which is
* number of bits of precision within the decoder line buffer supported by
* the DSC sink. This is used to populate the DSC parameters in the
* &struct drm_dsc_config by the driver.
* Driver creates an infoframe using these parameters to populate
* &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC
* infoframe using the helper function drm_dsc_pps_infoframe_pack()
*
* Returns:
* Line buffer depth supported by DSC panel or 0 its invalid
*/
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
case DP_DSC_LINE_BUF_BIT_DEPTH_9:
return 9;
case DP_DSC_LINE_BUF_BIT_DEPTH_10:
return 10;
case DP_DSC_LINE_BUF_BIT_DEPTH_11:
return 11;
case DP_DSC_LINE_BUF_BIT_DEPTH_12:
return 12;
case DP_DSC_LINE_BUF_BIT_DEPTH_13:
return 13;
case DP_DSC_LINE_BUF_BIT_DEPTH_14:
return 14;
case DP_DSC_LINE_BUF_BIT_DEPTH_15:
return 15;
case DP_DSC_LINE_BUF_BIT_DEPTH_16:
return 16;
case DP_DSC_LINE_BUF_BIT_DEPTH_8:
return 8;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
/**
* drm_dp_dsc_sink_supported_input_bpcs() - Get all the input bits per component
* values supported by the DSC sink.
* @dsc_dpcd: DSC capabilities from DPCD
* @dsc_bpc: An array to be filled by this helper with supported
* input bpcs.
*
* Read the DSC DPCD from the sink device to parse the supported bits per
* component values. This is used to populate the DSC parameters
* in the &struct drm_dsc_config by the driver.
* Driver creates an infoframe using these parameters to populate
* &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC
* infoframe using the helper function drm_dsc_pps_infoframe_pack()
*
* Returns:
* Number of input BPC values parsed from the DPCD
*/
int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
u8 dsc_bpc[3])
{
int num_bpc = 0;
u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
if (color_depth & DP_DSC_12_BPC)
dsc_bpc[num_bpc++] = 12;
if (color_depth & DP_DSC_10_BPC)
dsc_bpc[num_bpc++] = 10;
if (color_depth & DP_DSC_8_BPC)
dsc_bpc[num_bpc++] = 8;
return num_bpc;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address,
u8 *buf, int buf_size)
{
/*
* At least the DELL P2715Q monitor with a DPCD_REV < 0x14 returns
* corrupted values when reading from the 0xF0000- range with a block
* size bigger than 1.
*/
int block_size = dpcd[DP_DPCD_REV] < 0x14 ? 1 : buf_size;
int offset;
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
ret = drm_dp_dpcd_read(aux,
address + offset,
&buf[offset], block_size);
if (ret < 0)
return ret;
WARN_ON(ret != block_size);
}
return 0;
}
/**
* drm_dp_read_lttpr_common_caps - read the LTTPR common capabilities
* @aux: DisplayPort AUX channel
* @dpcd: DisplayPort configuration data
* @caps: buffer to return the capability info in
*
* Read capabilities common to all LTTPRs.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
{
return drm_dp_read_lttpr_regs(aux, dpcd,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
caps, DP_LTTPR_COMMON_CAP_SIZE);
}
EXPORT_SYMBOL(drm_dp_read_lttpr_common_caps);
/**
* drm_dp_read_lttpr_phy_caps - read the capabilities for a given LTTPR PHY
* @aux: DisplayPort AUX channel
* @dpcd: DisplayPort configuration data
* @dp_phy: LTTPR PHY to read the capabilities for
* @caps: buffer to return the capability info in
*
* Read the capabilities for the given LTTPR PHY.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
enum drm_dp_phy dp_phy,
u8 caps[DP_LTTPR_PHY_CAP_SIZE])
{
return drm_dp_read_lttpr_regs(aux, dpcd,
DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy),
caps, DP_LTTPR_PHY_CAP_SIZE);
}
EXPORT_SYMBOL(drm_dp_read_lttpr_phy_caps);
static u8 dp_lttpr_common_cap(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE], int r)
{
return caps[r - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
}
/**
* drm_dp_lttpr_count - get the number of detected LTTPRs
* @caps: LTTPR common capabilities
*
* Get the number of detected LTTPRs from the LTTPR common capabilities info.
*
* Returns:
* -ERANGE if more than supported number (8) of LTTPRs are detected
* -EINVAL if the DP_PHY_REPEATER_CNT register contains an invalid value
* otherwise the number of detected LTTPRs
*/
int drm_dp_lttpr_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
{
u8 count = dp_lttpr_common_cap(caps, DP_PHY_REPEATER_CNT);
switch (hweight8(count)) {
case 0:
return 0;
case 1:
return 8 - ilog2(count);
case 8:
return -ERANGE;
default:
return -EINVAL;
}
}
EXPORT_SYMBOL(drm_dp_lttpr_count);
/**
* drm_dp_lttpr_max_link_rate - get the maximum link rate supported by all LTTPRs
* @caps: LTTPR common capabilities
*
* Returns the maximum link rate supported by all detected LTTPRs.
*/
int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
{
u8 rate = dp_lttpr_common_cap(caps, DP_MAX_LINK_RATE_PHY_REPEATER);
return drm_dp_bw_code_to_link_rate(rate);
}
EXPORT_SYMBOL(drm_dp_lttpr_max_link_rate);
/**
* drm_dp_lttpr_max_lane_count - get the maximum lane count supported by all LTTPRs
* @caps: LTTPR common capabilities
*
* Returns the maximum lane count supported by all detected LTTPRs.
*/
int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
{
u8 max_lanes = dp_lttpr_common_cap(caps, DP_MAX_LANE_COUNT_PHY_REPEATER);
return max_lanes & DP_MAX_LANE_COUNT_MASK;
}
EXPORT_SYMBOL(drm_dp_lttpr_max_lane_count);
/**
* drm_dp_lttpr_voltage_swing_level_3_supported - check for LTTPR vswing3 support
* @caps: LTTPR PHY capabilities
*
* Returns true if the @caps for an LTTPR TX PHY indicate support for
* voltage swing level 3.
*/
bool
drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE])
{
u8 txcap = dp_lttpr_phy_cap(caps, DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1);
return txcap & DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED;
}
EXPORT_SYMBOL(drm_dp_lttpr_voltage_swing_level_3_supported);
/**
* drm_dp_lttpr_pre_emphasis_level_3_supported - check for LTTPR preemph3 support
* @caps: LTTPR PHY capabilities
*
* Returns true if the @caps for an LTTPR TX PHY indicate support for
* pre-emphasis level 3.
*/
bool
drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE])
{
u8 txcap = dp_lttpr_phy_cap(caps, DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1);
return txcap & DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED;
}
EXPORT_SYMBOL(drm_dp_lttpr_pre_emphasis_level_3_supported);
/**
* drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
* @aux: DisplayPort AUX channel
* @data: DP phy compliance test parameters.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data)
{
int err;
u8 rate, lanes;
err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
&data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
&data->hbr2_reset,
sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
/**
* drm_dp_set_phy_test_pattern() - set the pattern to the sink.
* @aux: DisplayPort AUX channel
* @data: DP phy compliance test parameters.
* @dp_rev: DP revision to use for compliance testing
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data, u8 dp_rev)
{
int err, i;
u8 test_pattern;
test_pattern = data->phy_pattern;
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
err = drm_dp_dpcd_writeb(aux,
DP_LINK_QUAL_LANE0_SET + i,
test_pattern);
if (err < 0)
return err;
}
}
return 0;
}
EXPORT_SYMBOL(drm_dp_set_phy_test_pattern);
static const char *dp_pixelformat_get_name(enum dp_pixelformat pixelformat)
{
if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
return "Invalid";
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "RGB";
case DP_PIXELFORMAT_YUV444:
return "YUV444";
case DP_PIXELFORMAT_YUV422:
return "YUV422";
case DP_PIXELFORMAT_YUV420:
return "YUV420";
case DP_PIXELFORMAT_Y_ONLY:
return "Y_ONLY";
case DP_PIXELFORMAT_RAW:
return "RAW";
default:
return "Reserved";
}
}
static const char *dp_colorimetry_get_name(enum dp_pixelformat pixelformat,
enum dp_colorimetry colorimetry)
{
if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
return "Invalid";
switch (colorimetry) {
case DP_COLORIMETRY_DEFAULT:
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "sRGB";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "BT.601";
case DP_PIXELFORMAT_Y_ONLY:
return "DICOM PS3.14";
case DP_PIXELFORMAT_RAW:
return "Custom Color Profile";
default:
return "Reserved";
}
case DP_COLORIMETRY_RGB_WIDE_FIXED: /* and DP_COLORIMETRY_BT709_YCC */
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "Wide Fixed";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "BT.709";
default:
return "Reserved";
}
case DP_COLORIMETRY_RGB_WIDE_FLOAT: /* and DP_COLORIMETRY_XVYCC_601 */
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "Wide Float";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "xvYCC 601";
default:
return "Reserved";
}
case DP_COLORIMETRY_OPRGB: /* and DP_COLORIMETRY_XVYCC_709 */
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "OpRGB";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "xvYCC 709";
default:
return "Reserved";
}
case DP_COLORIMETRY_DCI_P3_RGB: /* and DP_COLORIMETRY_SYCC_601 */
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "DCI-P3";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "sYCC 601";
default:
return "Reserved";
}
case DP_COLORIMETRY_RGB_CUSTOM: /* and DP_COLORIMETRY_OPYCC_601 */
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "Custom Profile";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "OpYCC 601";
default:
return "Reserved";
}
case DP_COLORIMETRY_BT2020_RGB: /* and DP_COLORIMETRY_BT2020_CYCC */
switch (pixelformat) {
case DP_PIXELFORMAT_RGB:
return "BT.2020 RGB";
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "BT.2020 CYCC";
default:
return "Reserved";
}
case DP_COLORIMETRY_BT2020_YCC:
switch (pixelformat) {
case DP_PIXELFORMAT_YUV444:
case DP_PIXELFORMAT_YUV422:
case DP_PIXELFORMAT_YUV420:
return "BT.2020 YCC";
default:
return "Reserved";
}
default:
return "Invalid";
}
}
static const char *dp_dynamic_range_get_name(enum dp_dynamic_range dynamic_range)
{
switch (dynamic_range) {
case DP_DYNAMIC_RANGE_VESA:
return "VESA range";
case DP_DYNAMIC_RANGE_CTA:
return "CTA range";
default:
return "Invalid";
}
}
static const char *dp_content_type_get_name(enum dp_content_type content_type)
{
switch (content_type) {
case DP_CONTENT_TYPE_NOT_DEFINED:
return "Not defined";
case DP_CONTENT_TYPE_GRAPHICS:
return "Graphics";
case DP_CONTENT_TYPE_PHOTO:
return "Photo";
case DP_CONTENT_TYPE_VIDEO:
return "Video";
case DP_CONTENT_TYPE_GAME:
return "Game";
default:
return "Reserved";
}
}
void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
const struct drm_dp_vsc_sdp *vsc)
{
#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
vsc->revision, vsc->length);
DP_SDP_LOG(" pixelformat: %s\n",
dp_pixelformat_get_name(vsc->pixelformat));
DP_SDP_LOG(" colorimetry: %s\n",
dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
DP_SDP_LOG(" dynamic range: %s\n",
dp_dynamic_range_get_name(vsc->dynamic_range));
DP_SDP_LOG(" content type: %s\n",
dp_content_type_get_name(vsc->content_type));
#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
/**
* drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
* Returns maximum frl bandwidth supported by PCON in GBPS,
* returns 0 if not supported.
*/
int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
int bw;
u8 buf;
buf = port_cap[2];
bw = buf & DP_PCON_MAX_FRL_BW;
switch (bw) {
case DP_PCON_MAX_9GBPS:
return 9;
case DP_PCON_MAX_18GBPS:
return 18;
case DP_PCON_MAX_24GBPS:
return 24;
case DP_PCON_MAX_32GBPS:
return 32;
case DP_PCON_MAX_40GBPS:
return 40;
case DP_PCON_MAX_48GBPS:
return 48;
case DP_PCON_MAX_0GBPS:
default:
return 0;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
/**
* drm_dp_pcon_frl_prepare() - Prepare PCON for FRL.
* @aux: DisplayPort AUX channel
* @enable_frl_ready_hpd: Configure DP_PCON_ENABLE_HPD_READY.
*
* Returns 0 if success, else returns negative error code.
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
return ret;
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
/**
* drm_dp_pcon_is_frl_ready() - Is PCON ready for FRL
* @aux: DisplayPort AUX channel
*
* Returns true if success, else returns false.
*/
bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
{
int ret;
u8 buf;
ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
if (buf & DP_PCON_FRL_READY)
return true;
return false;
}
EXPORT_SYMBOL(drm_dp_pcon_is_frl_ready);
/**
* drm_dp_pcon_frl_configure_1() - Set HDMI LINK Configuration-Step1
* @aux: DisplayPort AUX channel
* @max_frl_gbps: maximum frl bw to be configured between PCON and HDMI sink
* @frl_mode: FRL Training mode, it can be either Concurrent or Sequential.
* In Concurrent Mode, the FRL link bring up can be done along with
* DP Link training. In Sequential mode, the FRL link bring up is done prior to
* the DP Link training.
*
* Returns 0 if success, else returns negative error code.
*/
int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
u8 frl_mode)
{
int ret;
u8 buf;
ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (frl_mode == DP_PCON_ENABLE_CONCURRENT_LINK)
buf |= DP_PCON_ENABLE_CONCURRENT_LINK;
else
buf &= ~DP_PCON_ENABLE_CONCURRENT_LINK;
switch (max_frl_gbps) {
case 9:
buf |= DP_PCON_ENABLE_MAX_BW_9GBPS;
break;
case 18:
buf |= DP_PCON_ENABLE_MAX_BW_18GBPS;
break;
case 24:
buf |= DP_PCON_ENABLE_MAX_BW_24GBPS;
break;
case 32:
buf |= DP_PCON_ENABLE_MAX_BW_32GBPS;
break;
case 40:
buf |= DP_PCON_ENABLE_MAX_BW_40GBPS;
break;
case 48:
buf |= DP_PCON_ENABLE_MAX_BW_48GBPS;
break;
case 0:
buf |= DP_PCON_ENABLE_MAX_BW_0GBPS;
break;
default:
return -EINVAL;
}
ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
/**
* drm_dp_pcon_frl_configure_2() - Set HDMI Link configuration Step-2
* @aux: DisplayPort AUX channel
* @max_frl_mask : Max FRL BW to be tried by the PCON with HDMI Sink
* @frl_type : FRL training type, can be Extended, or Normal.
* In Normal FRL training, the PCON tries each frl bw from the max_frl_mask
* starting from min, and stops when link training is successful. In Extended
* FRL training, all frl bw selected in the mask are trained by the PCON.
*
* Returns 0 if success, else returns negative error code.
*/
int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
u8 frl_type)
{
int ret;
u8 buf = max_frl_mask;
if (frl_type == DP_PCON_FRL_LINK_TRAIN_EXTENDED)
buf |= DP_PCON_FRL_LINK_TRAIN_EXTENDED;
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
/**
* drm_dp_pcon_reset_frl_config() - Re-Set HDMI Link configuration.
* @aux: DisplayPort AUX channel
*
* Returns 0 if success, else returns negative error code.
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
int ret;
ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
/**
* drm_dp_pcon_frl_enable() - Enable HDMI link through FRL
* @aux: DisplayPort AUX channel
*
* Returns 0 if success, else returns negative error code.
*/
int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
{
int ret;
u8 buf = 0;
ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
drm_dbg_kms(aux->drm_dev, "%s: PCON in Autonomous mode, can't enable FRL\n",
aux->name);
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
/**
* drm_dp_pcon_hdmi_link_active() - check if the PCON HDMI LINK status is active.
* @aux: DisplayPort AUX channel
*
* Returns true if link is active else returns false.
*/
bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
{
u8 buf;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
return buf & DP_PCON_HDMI_TX_LINK_ACTIVE;
}
EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_active);
/**
* drm_dp_pcon_hdmi_link_mode() - get the PCON HDMI LINK MODE
* @aux: DisplayPort AUX channel
* @frl_trained_mask: pointer to store bitmask of the trained bw configuration.
* Valid only if the MODE returned is FRL. For Normal Link training mode
* only 1 of the bits will be set, but in case of Extended mode, more than
* one bits can be set.
*
* Returns the link mode : TMDS or FRL on success, else returns negative error
* code.
*/
int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
{
u8 buf;
int mode;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
mode = buf & DP_PCON_HDMI_LINK_MODE;
if (frl_trained_mask && DP_PCON_HDMI_MODE_FRL == mode)
*frl_trained_mask = (buf & DP_PCON_HDMI_FRL_TRAINED_BW) >> 1;
return mode;
}
EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_mode);
/**
* drm_dp_pcon_hdmi_frl_link_error_count() - print the error count per lane
* during link failure between PCON and HDMI sink
* @aux: DisplayPort AUX channel
* @connector: DRM connector
* code.
**/
void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_connector *connector)
{
u8 buf, error_count;
int i, num_error;
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
switch (error_count) {
case DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS:
num_error = 100;
break;
case DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS:
num_error = 10;
break;
case DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS:
num_error = 3;
break;
default:
num_error = 0;
}
drm_err(aux->drm_dev, "%s: More than %d errors since the last read for lane %d",
aux->name, num_error, i);
}
}
EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count);
/*
* drm_dp_pcon_enc_is_dsc_1_2 - Does PCON Encoder supports DSC 1.2
* @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
*
* Returns true is PCON encoder is DSC 1.2 else returns false.
*/
bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
{
u8 buf;
u8 major_v, minor_v;
buf = pcon_dsc_dpcd[DP_PCON_DSC_VERSION - DP_PCON_DSC_ENCODER];
major_v = (buf & DP_PCON_DSC_MAJOR_MASK) >> DP_PCON_DSC_MAJOR_SHIFT;
minor_v = (buf & DP_PCON_DSC_MINOR_MASK) >> DP_PCON_DSC_MINOR_SHIFT;
if (major_v == 1 && minor_v == 2)
return true;
return false;
}
EXPORT_SYMBOL(drm_dp_pcon_enc_is_dsc_1_2);
/*
* drm_dp_pcon_dsc_max_slices - Get max slices supported by PCON DSC Encoder
* @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
*
* Returns maximum no. of slices supported by the PCON DSC Encoder.
*/
int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
{
u8 slice_cap1, slice_cap2;
slice_cap1 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_1 - DP_PCON_DSC_ENCODER];
slice_cap2 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_2 - DP_PCON_DSC_ENCODER];
if (slice_cap2 & DP_PCON_DSC_24_PER_DSC_ENC)
return 24;
if (slice_cap2 & DP_PCON_DSC_20_PER_DSC_ENC)
return 20;
if (slice_cap2 & DP_PCON_DSC_16_PER_DSC_ENC)
return 16;
if (slice_cap1 & DP_PCON_DSC_12_PER_DSC_ENC)
return 12;
if (slice_cap1 & DP_PCON_DSC_10_PER_DSC_ENC)
return 10;
if (slice_cap1 & DP_PCON_DSC_8_PER_DSC_ENC)
return 8;
if (slice_cap1 & DP_PCON_DSC_6_PER_DSC_ENC)
return 6;
if (slice_cap1 & DP_PCON_DSC_4_PER_DSC_ENC)
return 4;
if (slice_cap1 & DP_PCON_DSC_2_PER_DSC_ENC)
return 2;
if (slice_cap1 & DP_PCON_DSC_1_PER_DSC_ENC)
return 1;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slices);
/*
* drm_dp_pcon_dsc_max_slice_width() - Get max slice width for Pcon DSC encoder
* @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
*
* Returns maximum width of the slices in pixel width i.e. no. of pixels x 320.
*/
int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
{
u8 buf;
buf = pcon_dsc_dpcd[DP_PCON_DSC_MAX_SLICE_WIDTH - DP_PCON_DSC_ENCODER];
return buf * DP_DSC_SLICE_WIDTH_MULTIPLIER;
}
EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slice_width);
/*
* drm_dp_pcon_dsc_bpp_incr() - Get bits per pixel increment for PCON DSC encoder
* @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
*
* Returns the bpp precision supported by the PCON encoder.
*/
int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
{
u8 buf;
buf = pcon_dsc_dpcd[DP_PCON_DSC_BPP_INCR - DP_PCON_DSC_ENCODER];
switch (buf & DP_PCON_DSC_BPP_INCR_MASK) {
case DP_PCON_DSC_ONE_16TH_BPP:
return 16;
case DP_PCON_DSC_ONE_8TH_BPP:
return 8;
case DP_PCON_DSC_ONE_4TH_BPP:
return 4;
case DP_PCON_DSC_ONE_HALF_BPP:
return 2;
case DP_PCON_DSC_ONE_BPP:
return 1;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_dsc_bpp_incr);
static
int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
{
u8 buf;
int ret;
ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
buf |= DP_PCON_ENABLE_DSC_ENCODER;
if (pps_buf_config <= DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER) {
buf &= ~DP_PCON_ENCODER_PPS_OVERRIDE_MASK;
buf |= pps_buf_config << 2;
}
ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
if (ret < 0)
return ret;
return 0;
}
/**
* drm_dp_pcon_pps_default() - Let PCON fill the default pps parameters
* for DSC1.2 between PCON & HDMI2.1 sink
* @aux: DisplayPort AUX channel
*
* Returns 0 on success, else returns negative error code.
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
int ret;
ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
/**
* drm_dp_pcon_pps_override_buf() - Configure PPS encoder override buffer for
* HDMI sink
* @aux: DisplayPort AUX channel
* @pps_buf: 128 bytes to be written into PPS buffer for HDMI sink by PCON.
*
* Returns 0 on success, else returns negative error code.
*/
int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
/*
* drm_dp_pcon_pps_override_param() - Write PPS parameters to DSC encoder
* override registers
* @aux: DisplayPort AUX channel
* @pps_param: 3 Parameters (2 Bytes each) : Slice Width, Slice Height,
* bits_per_pixel.
*
* Returns 0 on success, else returns negative error code.
*/
int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
/*
* drm_dp_pcon_convert_rgb_to_ycbcr() - Configure the PCon to convert RGB to Ycbcr
* @aux: displayPort AUX channel
* @color_spc: Color-space/s for which conversion is to be enabled, 0 for disable.
*
* Returns 0 on success, else returns negative error code.
*/
int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
{
int ret;
u8 buf;
ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
if (color_spc & DP_CONVERSION_RGB_YCBCR_MASK)
buf |= (color_spc & DP_CONVERSION_RGB_YCBCR_MASK);
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
/**
* drm_edp_backlight_set_level() - Set the backlight level of an eDP panel via AUX
* @aux: The DP AUX channel to use
* @bl: Backlight capability info from drm_edp_backlight_init()
* @level: The brightness level to set
*
* Sets the brightness level of an eDP panel's backlight. Note that the panel's backlight must
* already have been enabled by the driver by calling drm_edp_backlight_enable().
*
* Returns: %0 on success, negative error code on failure
*/
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
u16 level)
{
int ret;
u8 buf[2] = { 0 };
/* The panel uses the PWM for controlling brightness levels */
if (!bl->aux_set)
return 0;
if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
} else {
buf[0] = level;
}
ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
}
return 0;
}
EXPORT_SYMBOL(drm_edp_backlight_set_level);
static int
drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
bool enable)
{
int ret;
u8 buf;
/* This panel uses the EDP_BL_PWR GPIO for enablement */
if (!bl->aux_enable)
return 0;
ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
if (ret != 1) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
if (ret != 1) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
}
return 0;
}
/**
* drm_edp_backlight_enable() - Enable an eDP panel's backlight using DPCD
* @aux: The DP AUX channel to use
* @bl: Backlight capability info from drm_edp_backlight_init()
* @level: The initial backlight level to set via AUX, if there is one
*
* This function handles enabling DPCD backlight controls on a panel over DPCD, while additionally
* restoring any important backlight state such as the given backlight level, the brightness byte
* count, backlight frequency, etc.
*
* Note that certain panels do not support being enabled or disabled via DPCD, but instead require
* that the driver handle enabling/disabling the panel through implementation-specific means using
* the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false,
* this function becomes a no-op, and the driver is expected to handle powering the panel on using
* the EDP_BL_PWR GPIO.
*
* Returns: %0 on success, negative error code on failure.
*/
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
const u16 level)
{
int ret;
u8 dpcd_buf;
if (bl->aux_set)
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
else
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
if (bl->pwmgen_bit_count) {
ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
if (ret != 1)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
if (ret != 1)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
else
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
}
ret = drm_edp_backlight_set_level(aux, bl, level);
if (ret < 0)
return ret;
ret = drm_edp_backlight_set_enable(aux, bl, true);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_edp_backlight_enable);
/**
* drm_edp_backlight_disable() - Disable an eDP backlight using DPCD, if supported
* @aux: The DP AUX channel to use
* @bl: Backlight capability info from drm_edp_backlight_init()
*
* This function handles disabling DPCD backlight controls on a panel over AUX.
*
* Note that certain panels do not support being enabled or disabled via DPCD, but instead require
* that the driver handle enabling/disabling the panel through implementation-specific means using
* the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false,
* this function becomes a no-op, and the driver is expected to handle powering the panel off using
* the EDP_BL_PWR GPIO.
*
* Returns: %0 on success or no-op, negative error code on failure.
*/
int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl)
{
int ret;
ret = drm_edp_backlight_set_enable(aux, bl, false);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_edp_backlight_disable);
static inline int
drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
{
int fxp, fxp_min, fxp_max, fxp_actual, f = 1;
int ret;
u8 pn, pn_min, pn_max;
if (!bl->aux_set)
return 0;
ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
}
pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
bl->max = (1 << pn) - 1;
if (!driver_pwm_freq_hz)
return 0;
/*
* Set PWM Frequency divider to match desired frequency provided by the driver.
* The PWM Frequency is calculated as 27Mhz / (F x P).
* - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the
* EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h)
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
*/
/* Find desired value of (F x P)
* Note that, if F x P is out of supported range, the maximum value or minimum value will
* applied automatically. So no need to check that.
*/
fxp = DIV_ROUND_CLOSEST(1000 * DP_EDP_BACKLIGHT_FREQ_BASE_KHZ, driver_pwm_freq_hz);
/* Use highest possible value of Pn for more granularity of brightness adjustment while
* satisfying the conditions below.
* - Pn is in the range of Pn_min and Pn_max
* - F is in the range of 1 and 255
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
aux->name, ret);
return 0;
}
ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
aux->name, ret);
return 0;
}
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
/* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
drm_dbg_kms(aux->drm_dev,
"%s: Driver defined backlight frequency (%d) out of range\n",
aux->name, driver_pwm_freq_hz);
return 0;
}
for (pn = pn_max; pn >= pn_min; pn--) {
f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
fxp_actual = f << pn;
if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
break;
}
ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
}
bl->pwmgen_bit_count = pn;
bl->max = (1 << pn) - 1;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) {
bl->pwm_freq_pre_divider = f;
drm_dbg_kms(aux->drm_dev, "%s: Using backlight frequency from driver (%dHz)\n",
aux->name, driver_pwm_freq_hz);
}
return 0;
}
static inline int
drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
u8 *current_mode)
{
int ret;
u8 buf[2];
u8 mode_reg;
ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
}
*current_mode = (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK);
if (!bl->aux_set)
return 0;
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
if (ret != size) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
}
if (bl->lsb_reg_used)
return (buf[0] << 8) | buf[1];
else
return buf[0];
}
/*
* If we're not in DPCD control mode yet, the programmed brightness value is meaningless and
* the driver should assume max brightness
*/
return bl->max;
}
/**
* drm_edp_backlight_init() - Probe a display panel's TCON using the standard VESA eDP backlight
* interface.
* @aux: The DP aux device to use for probing
* @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight
* @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz
* @edp_dpcd: A cached copy of the eDP DPCD
* @current_level: Where to store the probed brightness level, if any
* @current_mode: Where to store the currently set backlight control mode
*
* Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities,
* along with also probing the current and maximum supported brightness levels.
*
* If @driver_pwm_freq_hz is non-zero, this will be used as the backlight frequency. Otherwise, the
* default frequency from the panel is used.
*
* Returns: %0 on success, negative error code on failure.
*/
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
u16 *current_level, u8 *current_mode)
{
int ret;
if (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)
bl->aux_enable = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)
bl->aux_set = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
bl->lsb_reg_used = true;
/* Sanity check caps */
if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
drm_dbg_kms(aux->drm_dev,
"%s: Panel supports neither AUX or PWM brightness control? Aborting\n",
aux->name);
return -EINVAL;
}
ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
if (ret < 0)
return ret;
ret = drm_edp_backlight_probe_state(aux, bl, current_mode);
if (ret < 0)
return ret;
*current_level = ret;
drm_dbg_kms(aux->drm_dev,
"%s: Found backlight: aux_set=%d aux_enable=%d mode=%d\n",
aux->name, bl->aux_set, bl->aux_enable, *current_mode);
if (bl->aux_set) {
drm_dbg_kms(aux->drm_dev,
"%s: Backlight caps: level=%d/%d pwm_freq_pre_divider=%d lsb_reg_used=%d\n",
aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider,
bl->lsb_reg_used);
}
return 0;
}
EXPORT_SYMBOL(drm_edp_backlight_init);
#if IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
(IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))
static int dp_aux_backlight_update_status(struct backlight_device *bd)
{
struct dp_aux_backlight *bl = bl_get_data(bd);
u16 brightness = backlight_get_brightness(bd);
int ret = 0;
if (!backlight_is_blank(bd)) {
if (!bl->enabled) {
drm_edp_backlight_enable(bl->aux, &bl->info, brightness);
bl->enabled = true;
return 0;
}
ret = drm_edp_backlight_set_level(bl->aux, &bl->info, brightness);
} else {
if (bl->enabled) {
drm_edp_backlight_disable(bl->aux, &bl->info);
bl->enabled = false;
}
}
return ret;
}
static const struct backlight_ops dp_aux_bl_ops = {
.update_status = dp_aux_backlight_update_status,
};
/**
* drm_panel_dp_aux_backlight - create and use DP AUX backlight
* @panel: DRM panel
* @aux: The DP AUX channel to use
*
* Use this function to create and handle backlight if your panel
* supports backlight control over DP AUX channel using DPCD
* registers as per VESA's standard backlight control interface.
*
* When the panel is enabled backlight will be enabled after a
* successful call to &drm_panel_funcs.enable()
*
* When the panel is disabled backlight will be disabled before the
* call to &drm_panel_funcs.disable().
*
* A typical implementation for a panel driver supporting backlight
* control over DP AUX will call this function at probe time.
* Backlight will then be handled transparently without requiring
* any intervention from the driver.
*
* drm_panel_dp_aux_backlight() must be called after the call to drm_panel_init().
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
{
struct dp_aux_backlight *bl;
struct backlight_properties props = { 0 };
u16 current_level;
u8 current_mode;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
int ret;
if (!panel || !panel->dev || !aux)
return -EINVAL;
ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
if (!drm_edp_backlight_supported(edp_dpcd)) {
DRM_DEV_INFO(panel->dev, "DP AUX backlight is not supported\n");
return 0;
}
bl = devm_kzalloc(panel->dev, sizeof(*bl), GFP_KERNEL);
if (!bl)
return -ENOMEM;
bl->aux = aux;
ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd,
¤t_level, ¤t_mode);
if (ret < 0)
return ret;
props.type = BACKLIGHT_RAW;
props.brightness = current_level;
props.max_brightness = bl->info.max;
bl->base = devm_backlight_device_register(panel->dev, "dp_aux_backlight",
panel->dev, bl,
&dp_aux_bl_ops, &props);
if (IS_ERR(bl->base))
return PTR_ERR(bl->base);
backlight_disable(bl->base);
panel->backlight = bl->base;
return 0;
}
EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
| linux-master | drivers/gpu/drm/display/drm_dp_helper.c |
/*
* Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
/**
* DOC: scdc helpers
*
* Status and Control Data Channel (SCDC) is a mechanism introduced by the
* HDMI 2.0 specification. It is a point-to-point protocol that allows the
* HDMI source and HDMI sink to exchange data. The same I2C interface that
* is used to access EDID serves as the transport mechanism for SCDC.
*
* Note: The SCDC status is going to be lost when the display is
* disconnected. This can happen physically when the user disconnects
* the cable, but also when a display is switched on (such as waking up
* a TV).
*
* This is further complicated by the fact that, upon a disconnection /
* reconnection, KMS won't change the mode on its own. This means that
* one can't just rely on setting the SCDC status on enable, but also
* has to track the connector status changes using interrupts and
* restore the SCDC status. The typical solution for this is to trigger an
* empty modeset in drm_connector_helper_funcs.detect_ctx(), like what vc4 does
* in vc4_hdmi_reset_link().
*/
#define SCDC_I2C_SLAVE_ADDRESS 0x54
/**
* drm_scdc_read - read a block of data from SCDC
* @adapter: I2C controller
* @offset: start offset of block to read
* @buffer: return location for the block to read
* @size: size of the block to read
*
* Reads a block of data from SCDC, starting at a given offset.
*
* Returns:
* 0 on success, negative error code on failure.
*/
ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
size_t size)
{
int ret;
struct i2c_msg msgs[2] = {
{
.addr = SCDC_I2C_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
.buf = &offset,
}, {
.addr = SCDC_I2C_SLAVE_ADDRESS,
.flags = I2C_M_RD,
.len = size,
.buf = buffer,
}
};
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_scdc_read);
/**
* drm_scdc_write - write a block of data to SCDC
* @adapter: I2C controller
* @offset: start offset of block to write
* @buffer: block of data to write
* @size: size of the block to write
*
* Writes a block of data to SCDC, starting at a given offset.
*
* Returns:
* 0 on success, negative error code on failure.
*/
ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
const void *buffer, size_t size)
{
struct i2c_msg msg = {
.addr = SCDC_I2C_SLAVE_ADDRESS,
.flags = 0,
.len = 1 + size,
.buf = NULL,
};
void *data;
int err;
data = kmalloc(1 + size, GFP_KERNEL);
if (!data)
return -ENOMEM;
msg.buf = data;
memcpy(data, &offset, sizeof(offset));
memcpy(data + 1, buffer, size);
err = i2c_transfer(adapter, &msg, 1);
kfree(data);
if (err < 0)
return err;
if (err != 1)
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_scdc_write);
/**
* drm_scdc_get_scrambling_status - what is status of scrambling?
* @connector: connector
*
* Reads the scrambler status over SCDC, and checks the
* scrambling status.
*
* Returns:
* True if the scrambling is enabled, false otherwise.
*/
bool drm_scdc_get_scrambling_status(struct drm_connector *connector)
{
u8 status;
int ret;
ret = drm_scdc_readb(connector->ddc, SCDC_SCRAMBLER_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Failed to read scrambling status: %d\n",
connector->base.id, connector->name, ret);
return false;
}
return status & SCDC_SCRAMBLING_STATUS;
}
EXPORT_SYMBOL(drm_scdc_get_scrambling_status);
/**
* drm_scdc_set_scrambling - enable scrambling
* @connector: connector
* @enable: bool to indicate if scrambling is to be enabled/disabled
*
* Writes the TMDS config register over SCDC channel, and:
* enables scrambling when enable = 1
* disables scrambling when enable = 0
*
* Returns:
* True if scrambling is set/reset successfully, false otherwise.
*/
bool drm_scdc_set_scrambling(struct drm_connector *connector,
bool enable)
{
u8 config;
int ret;
ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.id, connector->name, ret);
return false;
}
if (enable)
config |= SCDC_SCRAMBLING_ENABLE;
else
config &= ~SCDC_SCRAMBLING_ENABLE;
ret = drm_scdc_writeb(connector->ddc, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Failed to enable scrambling: %d\n",
connector->base.id, connector->name, ret);
return false;
}
return true;
}
EXPORT_SYMBOL(drm_scdc_set_scrambling);
/**
* drm_scdc_set_high_tmds_clock_ratio - set TMDS clock ratio
* @connector: connector
* @set: ret or reset the high clock ratio
*
*
* TMDS clock ratio calculations go like this:
* TMDS character = 10 bit TMDS encoded value
*
* TMDS character rate = The rate at which TMDS characters are
* transmitted (Mcsc)
*
* TMDS bit rate = 10x TMDS character rate
*
* As per the spec:
* TMDS clock rate for pixel clock < 340 MHz = 1x the character
* rate = 1/10 pixel clock rate
*
* TMDS clock rate for pixel clock > 340 MHz = 0.25x the character
* rate = 1/40 pixel clock rate
*
* Writes to the TMDS config register over SCDC channel, and:
* sets TMDS clock ratio to 1/40 when set = 1
*
* sets TMDS clock ratio to 1/10 when set = 0
*
* Returns:
* True if write is successful, false otherwise.
*/
bool drm_scdc_set_high_tmds_clock_ratio(struct drm_connector *connector,
bool set)
{
u8 config;
int ret;
ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.id, connector->name, ret);
return false;
}
if (set)
config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
else
config &= ~SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
ret = drm_scdc_writeb(connector->ddc, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Failed to set TMDS clock ratio: %d\n",
connector->base.id, connector->name, ret);
return false;
}
/*
* The spec says that a source should wait minimum 1ms and maximum
* 100ms after writing the TMDS config for clock ratio. Lets allow a
* wait of up to 2ms here.
*/
usleep_range(1000, 2000);
return true;
}
EXPORT_SYMBOL(drm_scdc_set_high_tmds_clock_ratio);
| linux-master | drivers/gpu/drm/display/drm_scdc_helper.c |
// SPDX-License-Identifier: GPL-2.0
//
// Ingenic JZ47xx KMS driver
//
// Copyright (C) 2019, Paul Cercueil <[email protected]>
#include "ingenic-drm.h"
#include <linux/bitfield.h>
#include <linux/component.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#define HWDESC_PALETTE 2
struct ingenic_dma_hwdesc {
u32 next;
u32 addr;
u32 id;
u32 cmd;
/* extended hw descriptor for jz4780 */
u32 offsize;
u32 pagewidth;
u32 cpos;
u32 dessize;
} __aligned(16);
struct ingenic_dma_hwdescs {
struct ingenic_dma_hwdesc hwdesc[3];
u16 palette[256] __aligned(16);
};
struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
bool has_alpha;
bool map_noncoherent;
bool use_extended_hwdesc;
bool plane_f0_not_working;
u32 max_burst;
unsigned int max_width, max_height;
const u32 *formats_f0, *formats_f1;
unsigned int num_formats_f0, num_formats_f1;
};
struct ingenic_drm_private_state {
struct drm_private_state base;
bool use_palette;
};
struct ingenic_drm {
struct drm_device drm;
/*
* f1 (aka. foreground1) is our primary plane, on top of which
* f0 (aka. foreground0) can be overlayed. Z-order is fixed in
* hardware and cannot be changed.
*/
struct drm_plane f0, f1, *ipu_plane;
struct drm_crtc crtc;
struct device *dev;
struct regmap *map;
struct clk *lcd_clk, *pix_clk;
const struct jz_soc_info *soc_info;
struct ingenic_dma_hwdescs *dma_hwdescs;
dma_addr_t dma_hwdescs_phys;
bool panel_is_sharp;
bool no_vblank;
/*
* clk_mutex is used to synchronize the pixel clock rate update with
* the VBLANK. When the pixel clock's parent clock needs to be updated,
* clock_nb's notifier function will lock the mutex, then wait until the
* next VBLANK. At that point, the parent clock's rate can be updated,
* and the mutex is then unlocked. If an atomic commit happens in the
* meantime, it will lock on the mutex, effectively waiting until the
* clock update process finishes. Finally, the pixel clock's rate will
* be recomputed when the mutex has been released, in the pending atomic
* commit, or a future one.
*/
struct mutex clk_mutex;
bool update_clk_rate;
struct notifier_block clock_nb;
struct drm_private_obj private_obj;
};
struct ingenic_drm_bridge {
struct drm_encoder encoder;
struct drm_bridge bridge, *next_bridge;
struct drm_bus_cfg bus_cfg;
};
static inline struct ingenic_drm_bridge *
to_ingenic_drm_bridge(struct drm_encoder *encoder)
{
return container_of(encoder, struct ingenic_drm_bridge, encoder);
}
static inline struct ingenic_drm_private_state *
to_ingenic_drm_priv_state(struct drm_private_state *state)
{
return container_of(state, struct ingenic_drm_private_state, base);
}
static struct ingenic_drm_private_state *
ingenic_drm_get_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state)
{
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_ingenic_drm_priv_state(priv_state);
}
static struct ingenic_drm_private_state *
ingenic_drm_get_new_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state)
{
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj);
if (!priv_state)
return NULL;
return to_ingenic_drm_priv_state(priv_state);
}
static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case JZ_REG_LCD_IID:
case JZ_REG_LCD_SA0:
case JZ_REG_LCD_FID0:
case JZ_REG_LCD_CMD0:
case JZ_REG_LCD_SA1:
case JZ_REG_LCD_FID1:
case JZ_REG_LCD_CMD1:
return false;
default:
return true;
}
}
static const struct regmap_config ingenic_drm_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.writeable_reg = ingenic_drm_writeable_reg,
};
static inline struct ingenic_drm *drm_device_get_priv(struct drm_device *drm)
{
return container_of(drm, struct ingenic_drm, drm);
}
static inline struct ingenic_drm *drm_crtc_get_priv(struct drm_crtc *crtc)
{
return container_of(crtc, struct ingenic_drm, crtc);
}
static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb)
{
return container_of(nb, struct ingenic_drm, clock_nb);
}
static inline dma_addr_t dma_hwdesc_addr(const struct ingenic_drm *priv,
unsigned int idx)
{
u32 offset = offsetof(struct ingenic_dma_hwdescs, hwdesc[idx]);
return priv->dma_hwdescs_phys + offset;
}
static int ingenic_drm_update_pixclk(struct notifier_block *nb,
unsigned long action,
void *data)
{
struct ingenic_drm *priv = drm_nb_get_priv(nb);
switch (action) {
case PRE_RATE_CHANGE:
mutex_lock(&priv->clk_mutex);
priv->update_clk_rate = true;
drm_crtc_wait_one_vblank(&priv->crtc);
return NOTIFY_OK;
default:
mutex_unlock(&priv->clk_mutex);
return NOTIFY_OK;
}
}
static void ingenic_drm_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
regmap_write(priv->map, JZ_REG_LCD_STATE, 0);
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_ENABLE | JZ_LCD_CTRL_DISABLE,
JZ_LCD_CTRL_ENABLE);
}
static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct ingenic_drm_private_state *priv_state;
unsigned int next_id;
priv_state = ingenic_drm_get_priv_state(priv, state);
if (WARN_ON(IS_ERR(priv_state)))
return;
/* Set addresses of our DMA descriptor chains */
next_id = priv_state->use_palette ? HWDESC_PALETTE : 0;
regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_addr(priv, next_id));
regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_addr(priv, 1));
drm_crtc_vblank_on(crtc);
}
static void ingenic_drm_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
unsigned int var;
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_DISABLE, JZ_LCD_CTRL_DISABLE);
regmap_read_poll_timeout(priv->map, JZ_REG_LCD_STATE, var,
var & JZ_LCD_STATE_DISABLED,
1000, 0);
}
static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
drm_crtc_vblank_off(crtc);
}
static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
struct drm_display_mode *mode)
{
unsigned int vpe, vds, vde, vt, hpe, hds, hde, ht;
vpe = mode->crtc_vsync_end - mode->crtc_vsync_start;
vds = mode->crtc_vtotal - mode->crtc_vsync_start;
vde = vds + mode->crtc_vdisplay;
vt = vde + mode->crtc_vsync_start - mode->crtc_vdisplay;
hpe = mode->crtc_hsync_end - mode->crtc_hsync_start;
hds = mode->crtc_htotal - mode->crtc_hsync_start;
hde = hds + mode->crtc_hdisplay;
ht = hde + mode->crtc_hsync_start - mode->crtc_hdisplay;
regmap_write(priv->map, JZ_REG_LCD_VSYNC,
0 << JZ_LCD_VSYNC_VPS_OFFSET |
vpe << JZ_LCD_VSYNC_VPE_OFFSET);
regmap_write(priv->map, JZ_REG_LCD_HSYNC,
0 << JZ_LCD_HSYNC_HPS_OFFSET |
hpe << JZ_LCD_HSYNC_HPE_OFFSET);
regmap_write(priv->map, JZ_REG_LCD_VAT,
ht << JZ_LCD_VAT_HT_OFFSET |
vt << JZ_LCD_VAT_VT_OFFSET);
regmap_write(priv->map, JZ_REG_LCD_DAH,
hds << JZ_LCD_DAH_HDS_OFFSET |
hde << JZ_LCD_DAH_HDE_OFFSET);
regmap_write(priv->map, JZ_REG_LCD_DAV,
vds << JZ_LCD_DAV_VDS_OFFSET |
vde << JZ_LCD_DAV_VDE_OFFSET);
if (priv->panel_is_sharp) {
regmap_write(priv->map, JZ_REG_LCD_PS, hde << 16 | (hde + 1));
regmap_write(priv->map, JZ_REG_LCD_CLS, hde << 16 | (hde + 1));
regmap_write(priv->map, JZ_REG_LCD_SPL, hpe << 16 | (hpe + 1));
regmap_write(priv->map, JZ_REG_LCD_REV, mode->htotal << 16);
}
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_MASK,
JZ_LCD_CTRL_OFUP | priv->soc_info->max_burst);
/*
* IPU restart - specify how much time the LCDC will wait before
* transferring a new frame from the IPU. The value is the one
* suggested in the programming manual.
*/
regmap_write(priv->map, JZ_REG_LCD_IPUR, JZ_LCD_IPUR_IPUREN |
(ht * vpe / 3) << JZ_LCD_IPUR_IPUR_LSB);
}
static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
if (crtc_state->gamma_lut &&
drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
dev_dbg(priv->dev, "Invalid palette size\n");
return -EINVAL;
}
if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) {
f1_state = drm_atomic_get_plane_state(crtc_state->state,
&priv->f1);
if (IS_ERR(f1_state))
return PTR_ERR(f1_state);
f0_state = drm_atomic_get_plane_state(crtc_state->state,
&priv->f0);
if (IS_ERR(f0_state))
return PTR_ERR(f0_state);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && priv->ipu_plane) {
ipu_state = drm_atomic_get_plane_state(crtc_state->state,
priv->ipu_plane);
if (IS_ERR(ipu_state))
return PTR_ERR(ipu_state);
/* IPU and F1 planes cannot be enabled at the same time. */
if (f1_state->fb && ipu_state->fb) {
dev_dbg(priv->dev, "Cannot enable both F1 and IPU\n");
return -EINVAL;
}
}
/* If all the planes are disabled, we won't get a VBLANK IRQ */
priv->no_vblank = !f1_state->fb && !f0_state->fb &&
!(ipu_state && ipu_state->fb);
}
return 0;
}
static enum drm_mode_status
ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
long rate;
if (mode->hdisplay > priv->soc_info->max_width)
return MODE_BAD_HVALUE;
if (mode->vdisplay > priv->soc_info->max_height)
return MODE_BAD_VVALUE;
rate = clk_round_rate(priv->pix_clk, mode->clock * 1000);
if (rate < 0)
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
u32 ctrl = 0;
if (priv->soc_info->has_osd &&
drm_atomic_crtc_needs_modeset(crtc_state)) {
/*
* If IPU plane is enabled, enable IPU as source for the F1
* plane; otherwise use regular DMA.
*/
if (priv->ipu_plane && priv->ipu_plane->state->fb)
ctrl |= JZ_LCD_OSDCTRL_IPU;
regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
JZ_LCD_OSDCTRL_IPU, ctrl);
}
}
static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_pending_vblank_event *event = crtc_state->event;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
ingenic_drm_crtc_update_timings(priv, &crtc_state->adjusted_mode);
priv->update_clk_rate = true;
}
if (priv->update_clk_rate) {
mutex_lock(&priv->clk_mutex);
clk_set_rate(priv->pix_clk,
crtc_state->adjusted_mode.crtc_clock * 1000);
priv->update_clk_rate = false;
mutex_unlock(&priv->clk_mutex);
}
if (event) {
crtc_state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
struct ingenic_drm_private_state *priv_state;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
int ret;
if (!crtc)
return 0;
if (priv->soc_info->plane_f0_not_working && plane == &priv->f0)
return -EINVAL;
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
priv_state = ingenic_drm_get_priv_state(priv, state);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
priv->soc_info->has_osd,
true);
if (ret)
return ret;
/*
* If OSD is not available, check that the width/height match.
* Note that state->src_* are in 16.16 fixed-point format.
*/
if (!priv->soc_info->has_osd &&
(new_plane_state->src_x != 0 ||
(new_plane_state->src_w >> 16) != new_plane_state->crtc_w ||
(new_plane_state->src_h >> 16) != new_plane_state->crtc_h))
return -EINVAL;
priv_state->use_palette = new_plane_state->fb &&
new_plane_state->fb->format->format == DRM_FORMAT_C8;
/*
* Require full modeset if enabling or disabling a plane, or changing
* its position, size or depth.
*/
if (priv->soc_info->has_osd &&
(!old_plane_state->fb || !new_plane_state->fb ||
old_plane_state->crtc_x != new_plane_state->crtc_x ||
old_plane_state->crtc_y != new_plane_state->crtc_y ||
old_plane_state->crtc_w != new_plane_state->crtc_w ||
old_plane_state->crtc_h != new_plane_state->crtc_h ||
old_plane_state->fb->format->format != new_plane_state->fb->format->format))
crtc_state->mode_changed = true;
if (priv->soc_info->map_noncoherent)
drm_atomic_helper_check_plane_damage(state, new_plane_state);
return 0;
}
static void ingenic_drm_plane_enable(struct ingenic_drm *priv,
struct drm_plane *plane)
{
unsigned int en_bit;
if (priv->soc_info->has_osd) {
if (plane != &priv->f0)
en_bit = JZ_LCD_OSDC_F1EN;
else
en_bit = JZ_LCD_OSDC_F0EN;
regmap_set_bits(priv->map, JZ_REG_LCD_OSDC, en_bit);
}
}
void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
unsigned int en_bit;
if (priv->soc_info->has_osd) {
if (plane != &priv->f0)
en_bit = JZ_LCD_OSDC_F1EN;
else
en_bit = JZ_LCD_OSDC_F0EN;
regmap_clear_bits(priv->map, JZ_REG_LCD_OSDC, en_bit);
}
}
static void ingenic_drm_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
ingenic_drm_plane_disable(priv->dev, plane);
}
void ingenic_drm_plane_config(struct device *dev,
struct drm_plane *plane, u32 fourcc)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
struct drm_plane_state *state = plane->state;
unsigned int xy_reg, size_reg;
unsigned int ctrl = 0;
ingenic_drm_plane_enable(priv, plane);
if (priv->soc_info->has_osd && plane != &priv->f0) {
switch (fourcc) {
case DRM_FORMAT_XRGB1555:
ctrl |= JZ_LCD_OSDCTRL_RGB555;
fallthrough;
case DRM_FORMAT_RGB565:
ctrl |= JZ_LCD_OSDCTRL_BPP_15_16;
break;
case DRM_FORMAT_RGB888:
ctrl |= JZ_LCD_OSDCTRL_BPP_24_COMP;
break;
case DRM_FORMAT_XRGB8888:
ctrl |= JZ_LCD_OSDCTRL_BPP_18_24;
break;
case DRM_FORMAT_XRGB2101010:
ctrl |= JZ_LCD_OSDCTRL_BPP_30;
break;
}
regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
JZ_LCD_OSDCTRL_BPP_MASK, ctrl);
} else {
switch (fourcc) {
case DRM_FORMAT_C8:
ctrl |= JZ_LCD_CTRL_BPP_8;
break;
case DRM_FORMAT_XRGB1555:
ctrl |= JZ_LCD_CTRL_RGB555;
fallthrough;
case DRM_FORMAT_RGB565:
ctrl |= JZ_LCD_CTRL_BPP_15_16;
break;
case DRM_FORMAT_RGB888:
ctrl |= JZ_LCD_CTRL_BPP_24_COMP;
break;
case DRM_FORMAT_XRGB8888:
ctrl |= JZ_LCD_CTRL_BPP_18_24;
break;
case DRM_FORMAT_XRGB2101010:
ctrl |= JZ_LCD_CTRL_BPP_30;
break;
}
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_BPP_MASK, ctrl);
}
if (priv->soc_info->has_osd) {
if (plane != &priv->f0) {
xy_reg = JZ_REG_LCD_XYP1;
size_reg = JZ_REG_LCD_SIZE1;
} else {
xy_reg = JZ_REG_LCD_XYP0;
size_reg = JZ_REG_LCD_SIZE0;
}
regmap_write(priv->map, xy_reg,
state->crtc_x << JZ_LCD_XYP01_XPOS_LSB |
state->crtc_y << JZ_LCD_XYP01_YPOS_LSB);
regmap_write(priv->map, size_reg,
state->crtc_w << JZ_LCD_SIZE01_WIDTH_LSB |
state->crtc_h << JZ_LCD_SIZE01_HEIGHT_LSB);
}
}
bool ingenic_drm_map_noncoherent(const struct device *dev)
{
const struct ingenic_drm *priv = dev_get_drvdata(dev);
return priv->soc_info->map_noncoherent;
}
static void ingenic_drm_update_palette(struct ingenic_drm *priv,
const struct drm_color_lut *lut)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(priv->dma_hwdescs->palette); i++) {
u16 color = drm_color_lut_extract(lut[i].red, 5) << 11
| drm_color_lut_extract(lut[i].green, 6) << 5
| drm_color_lut_extract(lut[i].blue, 5);
priv->dma_hwdescs->palette[i] = color;
}
}
static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane);
unsigned int width, height, cpp, next_id, plane_id;
struct ingenic_drm_private_state *priv_state;
struct drm_crtc_state *crtc_state;
struct ingenic_dma_hwdesc *hwdesc;
dma_addr_t addr;
u32 fourcc;
if (newstate && newstate->fb) {
if (priv->soc_info->map_noncoherent)
drm_fb_dma_sync_non_coherent(&priv->drm, oldstate, newstate);
crtc_state = newstate->crtc->state;
plane_id = !!(priv->soc_info->has_osd && plane != &priv->f0);
addr = drm_fb_dma_get_gem_addr(newstate->fb, newstate, 0);
width = newstate->src_w >> 16;
height = newstate->src_h >> 16;
cpp = newstate->fb->format->cpp[0];
priv_state = ingenic_drm_get_new_priv_state(priv, state);
next_id = (priv_state && priv_state->use_palette) ? HWDESC_PALETTE : plane_id;
hwdesc = &priv->dma_hwdescs->hwdesc[plane_id];
hwdesc->addr = addr;
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
hwdesc->next = dma_hwdesc_addr(priv, next_id);
if (priv->soc_info->use_extended_hwdesc) {
hwdesc->cmd |= JZ_LCD_CMD_FRM_ENABLE;
/* Extended 8-byte descriptor */
hwdesc->cpos = 0;
hwdesc->offsize = 0;
hwdesc->pagewidth = 0;
switch (newstate->fb->format->format) {
case DRM_FORMAT_XRGB1555:
hwdesc->cpos |= JZ_LCD_CPOS_RGB555;
fallthrough;
case DRM_FORMAT_RGB565:
hwdesc->cpos |= JZ_LCD_CPOS_BPP_15_16;
break;
case DRM_FORMAT_XRGB8888:
hwdesc->cpos |= JZ_LCD_CPOS_BPP_18_24;
break;
}
hwdesc->cpos |= (JZ_LCD_CPOS_COEFFICIENT_1 <<
JZ_LCD_CPOS_COEFFICIENT_OFFSET);
hwdesc->dessize =
(0xff << JZ_LCD_DESSIZE_ALPHA_OFFSET) |
FIELD_PREP(JZ_LCD_DESSIZE_HEIGHT_MASK, height - 1) |
FIELD_PREP(JZ_LCD_DESSIZE_WIDTH_MASK, width - 1);
}
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
fourcc = newstate->fb->format->format;
ingenic_drm_plane_config(priv->dev, plane, fourcc);
crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8;
}
if (crtc_state->color_mgmt_changed)
ingenic_drm_update_palette(priv, crtc_state->gamma_lut->data);
}
}
static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct ingenic_drm *priv = drm_device_get_priv(encoder->dev);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct ingenic_drm_bridge *bridge = to_ingenic_drm_bridge(encoder);
unsigned int cfg, rgbcfg = 0;
priv->panel_is_sharp = bridge->bus_cfg.flags & DRM_BUS_FLAG_SHARP_SIGNALS;
if (priv->panel_is_sharp) {
cfg = JZ_LCD_CFG_MODE_SPECIAL_TFT_1 | JZ_LCD_CFG_REV_POLARITY;
} else {
cfg = JZ_LCD_CFG_PS_DISABLE | JZ_LCD_CFG_CLS_DISABLE
| JZ_LCD_CFG_SPL_DISABLE | JZ_LCD_CFG_REV_DISABLE;
}
if (priv->soc_info->use_extended_hwdesc)
cfg |= JZ_LCD_CFG_DESCRIPTOR_8;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW;
if (bridge->bus_cfg.flags & DRM_BUS_FLAG_DE_LOW)
cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW;
if (bridge->bus_cfg.flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
if (!priv->panel_is_sharp) {
if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
cfg |= JZ_LCD_CFG_MODE_TV_OUT_I;
else
cfg |= JZ_LCD_CFG_MODE_TV_OUT_P;
} else {
switch (bridge->bus_cfg.format) {
case MEDIA_BUS_FMT_RGB565_1X16:
cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT;
break;
case MEDIA_BUS_FMT_RGB666_1X18:
cfg |= JZ_LCD_CFG_MODE_GENERIC_18BIT;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
break;
case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
rgbcfg = JZ_LCD_RGBC_EVEN_GBR | JZ_LCD_RGBC_ODD_RGB;
fallthrough;
case MEDIA_BUS_FMT_RGB888_3X8:
cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
break;
default:
break;
}
}
}
regmap_write(priv->map, JZ_REG_LCD_CFG, cfg);
regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
}
static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
return drm_bridge_attach(bridge->encoder, ib->next_bridge,
&ib->bridge, flags);
}
static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
ib->bus_cfg = bridge_state->output_bus_cfg;
if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV)
return 0;
switch (bridge_state->output_bus_cfg.format) {
case MEDIA_BUS_FMT_RGB888_3X8:
case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
/*
* The LCD controller expects timing values in dot-clock ticks,
* which is 3x the timing values in pixels when using a 3x8-bit
* display; but it will count the display area size in pixels
* either way. Go figure.
*/
mode->crtc_clock = mode->clock * 3;
mode->crtc_hsync_start = mode->hsync_start * 3 - mode->hdisplay * 2;
mode->crtc_hsync_end = mode->hsync_end * 3 - mode->hdisplay * 2;
mode->crtc_hdisplay = mode->hdisplay;
mode->crtc_htotal = mode->htotal * 3 - mode->hdisplay * 2;
return 0;
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
return 0;
default:
return -EINVAL;
}
}
static u32 *
ingenic_drm_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
switch (output_fmt) {
case MEDIA_BUS_FMT_RGB888_1X24:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB888_3X8:
case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
break;
default:
*num_input_fmts = 0;
return NULL;
}
return drm_atomic_helper_bridge_propagate_bus_fmt(bridge, bridge_state,
crtc_state, conn_state,
output_fmt,
num_input_fmts);
}
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
struct ingenic_drm *priv = drm_device_get_priv(arg);
unsigned int state;
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
regmap_update_bits(priv->map, JZ_REG_LCD_STATE,
JZ_LCD_STATE_EOF_IRQ, 0);
if (state & JZ_LCD_STATE_EOF_IRQ)
drm_crtc_handle_vblank(&priv->crtc);
return IRQ_HANDLED;
}
static int ingenic_drm_enable_vblank(struct drm_crtc *crtc)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
if (priv->no_vblank)
return -EINVAL;
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_EOF_IRQ, JZ_LCD_CTRL_EOF_IRQ);
return 0;
}
static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0);
}
static struct drm_framebuffer *
ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
if (priv->soc_info->map_noncoherent)
return drm_gem_fb_create_with_dirty(drm, file, mode_cmd);
return drm_gem_fb_create(drm, file, mode_cmd);
}
static struct drm_gem_object *
ingenic_drm_gem_create_object(struct drm_device *drm, size_t size)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
struct drm_gem_dma_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
obj->map_noncoherent = priv->soc_info->map_noncoherent;
return &obj->base;
}
static struct drm_private_state *
ingenic_drm_duplicate_state(struct drm_private_obj *obj)
{
struct ingenic_drm_private_state *state = to_ingenic_drm_priv_state(obj->state);
state = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void ingenic_drm_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct ingenic_drm_private_state *priv_state = to_ingenic_drm_priv_state(state);
kfree(priv_state);
}
DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",
.date = "20200716",
.major = 1,
.minor = 1,
.patchlevel = 0,
.fops = &ingenic_drm_fops,
.gem_create_object = ingenic_drm_gem_create_object,
DRM_GEM_DMA_DRIVER_OPS,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.destroy = drm_plane_cleanup,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static const struct drm_crtc_funcs ingenic_drm_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ingenic_drm_enable_vblank,
.disable_vblank = ingenic_drm_disable_vblank,
};
static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = {
.atomic_update = ingenic_drm_plane_atomic_update,
.atomic_check = ingenic_drm_plane_atomic_check,
.atomic_disable = ingenic_drm_plane_atomic_disable,
};
static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = {
.atomic_enable = ingenic_drm_crtc_atomic_enable,
.atomic_disable = ingenic_drm_crtc_atomic_disable,
.atomic_begin = ingenic_drm_crtc_atomic_begin,
.atomic_flush = ingenic_drm_crtc_atomic_flush,
.atomic_check = ingenic_drm_crtc_atomic_check,
.mode_valid = ingenic_drm_crtc_mode_valid,
};
static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs = {
.atomic_mode_set = ingenic_drm_encoder_atomic_mode_set,
};
static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
.attach = ingenic_drm_bridge_attach,
.atomic_enable = ingenic_drm_bridge_atomic_enable,
.atomic_disable = ingenic_drm_bridge_atomic_disable,
.atomic_check = ingenic_drm_bridge_atomic_check,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_get_input_bus_fmts = ingenic_drm_bridge_atomic_get_input_bus_fmts,
};
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
.fb_create = ingenic_drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail,
};
static const struct drm_private_state_funcs ingenic_drm_private_state_funcs = {
.atomic_duplicate_state = ingenic_drm_duplicate_state,
.atomic_destroy_state = ingenic_drm_destroy_state,
};
static void ingenic_drm_unbind_all(void *d)
{
struct ingenic_drm *priv = d;
component_unbind_all(priv->dev, &priv->drm);
}
static void __maybe_unused ingenic_drm_release_rmem(void *d)
{
of_reserved_mem_device_release(d);
}
static void ingenic_drm_configure_hwdesc(struct ingenic_drm *priv,
unsigned int hwdesc,
unsigned int next_hwdesc, u32 id)
{
struct ingenic_dma_hwdesc *desc = &priv->dma_hwdescs->hwdesc[hwdesc];
desc->next = dma_hwdesc_addr(priv, next_hwdesc);
desc->id = id;
}
static void ingenic_drm_configure_hwdesc_palette(struct ingenic_drm *priv)
{
struct ingenic_dma_hwdesc *desc;
ingenic_drm_configure_hwdesc(priv, HWDESC_PALETTE, 0, 0xc0);
desc = &priv->dma_hwdescs->hwdesc[HWDESC_PALETTE];
desc->addr = priv->dma_hwdescs_phys
+ offsetof(struct ingenic_dma_hwdescs, palette);
desc->cmd = JZ_LCD_CMD_ENABLE_PAL
| (sizeof(priv->dma_hwdescs->palette) / 4);
}
static void ingenic_drm_configure_hwdesc_plane(struct ingenic_drm *priv,
unsigned int plane)
{
ingenic_drm_configure_hwdesc(priv, plane, plane, 0xf0 | plane);
}
static void ingenic_drm_atomic_private_obj_fini(struct drm_device *drm, void *private_obj)
{
drm_atomic_private_obj_fini(private_obj);
}
static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
struct ingenic_drm_private_state *private_state;
const struct jz_soc_info *soc_info;
struct ingenic_drm *priv;
struct clk *parent_clk;
struct drm_plane *primary;
struct drm_bridge *bridge;
struct drm_panel *panel;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct ingenic_drm_bridge *ib;
struct drm_device *drm;
void __iomem *base;
struct resource *res;
struct regmap_config regmap_config;
long parent_rate;
unsigned int i, clone_mask = 0;
int ret, irq;
u32 osdc = 0;
soc_info = of_device_get_match_data(dev);
if (!soc_info) {
dev_err(dev, "Missing platform data\n");
return -EINVAL;
}
if (IS_ENABLED(CONFIG_OF_RESERVED_MEM)) {
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
dev_warn(dev, "Failed to get reserved memory: %d\n", ret);
if (!ret) {
ret = devm_add_action_or_reset(dev, ingenic_drm_release_rmem, dev);
if (ret)
return ret;
}
}
priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
struct ingenic_drm, drm);
if (IS_ERR(priv))
return PTR_ERR(priv);
priv->soc_info = soc_info;
priv->dev = dev;
drm = &priv->drm;
platform_set_drvdata(pdev, priv);
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = soc_info->max_width;
drm->mode_config.max_height = 4095;
drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
drm->mode_config.helper_private = &ingenic_drm_mode_config_helpers;
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource\n");
return PTR_ERR(base);
}
regmap_config = ingenic_drm_regmap_config;
regmap_config.max_register = res->end - res->start;
priv->map = devm_regmap_init_mmio(dev, base,
®map_config);
if (IS_ERR(priv->map)) {
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(priv->map);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (soc_info->needs_dev_clk) {
priv->lcd_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(priv->lcd_clk)) {
dev_err(dev, "Failed to get lcd clock\n");
return PTR_ERR(priv->lcd_clk);
}
}
priv->pix_clk = devm_clk_get(dev, "lcd_pclk");
if (IS_ERR(priv->pix_clk)) {
dev_err(dev, "Failed to get pixel clock\n");
return PTR_ERR(priv->pix_clk);
}
priv->dma_hwdescs = dmam_alloc_coherent(dev,
sizeof(*priv->dma_hwdescs),
&priv->dma_hwdescs_phys,
GFP_KERNEL);
if (!priv->dma_hwdescs)
return -ENOMEM;
/* Configure DMA hwdesc for foreground0 plane */
ingenic_drm_configure_hwdesc_plane(priv, 0);
/* Configure DMA hwdesc for foreground1 plane */
ingenic_drm_configure_hwdesc_plane(priv, 1);
/* Configure DMA hwdesc for palette */
ingenic_drm_configure_hwdesc_palette(priv);
primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
ret = drm_universal_plane_init(drm, primary, 1,
&ingenic_drm_primary_plane_funcs,
priv->soc_info->formats_f1,
priv->soc_info->num_formats_f1,
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev, "Failed to register plane: %i\n", ret);
return ret;
}
if (soc_info->map_noncoherent)
drm_plane_enable_fb_damage_clips(&priv->f1);
drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
NULL, &ingenic_drm_crtc_funcs, NULL);
if (ret) {
dev_err(dev, "Failed to init CRTC: %i\n", ret);
return ret;
}
drm_crtc_enable_color_mgmt(&priv->crtc, 0, false,
ARRAY_SIZE(priv->dma_hwdescs->palette));
if (soc_info->has_osd) {
drm_plane_helper_add(&priv->f0,
&ingenic_drm_plane_helper_funcs);
ret = drm_universal_plane_init(drm, &priv->f0, 1,
&ingenic_drm_primary_plane_funcs,
priv->soc_info->formats_f0,
priv->soc_info->num_formats_f0,
NULL, DRM_PLANE_TYPE_OVERLAY,
NULL);
if (ret) {
dev_err(dev, "Failed to register overlay plane: %i\n",
ret);
return ret;
}
if (soc_info->map_noncoherent)
drm_plane_enable_fb_damage_clips(&priv->f0);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to bind components: %i\n", ret);
return ret;
}
ret = devm_add_action_or_reset(dev, ingenic_drm_unbind_all, priv);
if (ret)
return ret;
priv->ipu_plane = drm_plane_from_index(drm, 2);
if (!priv->ipu_plane) {
dev_err(dev, "Failed to retrieve IPU plane\n");
return -EINVAL;
}
}
}
for (i = 0; ; i++) {
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, i, &panel, &bridge);
if (ret) {
if (ret == -ENODEV)
break; /* we're done */
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get bridge handle\n");
return ret;
}
if (panel)
bridge = devm_drm_panel_bridge_add_typed(dev, panel,
DRM_MODE_CONNECTOR_DPI);
ib = drmm_encoder_alloc(drm, struct ingenic_drm_bridge, encoder,
NULL, DRM_MODE_ENCODER_DPI, NULL);
if (IS_ERR(ib)) {
ret = PTR_ERR(ib);
dev_err(dev, "Failed to init encoder: %d\n", ret);
return ret;
}
encoder = &ib->encoder;
encoder->possible_crtcs = drm_crtc_mask(&priv->crtc);
drm_encoder_helper_add(encoder, &ingenic_drm_encoder_helper_funcs);
ib->bridge.funcs = &ingenic_drm_bridge_funcs;
ib->next_bridge = bridge;
ret = drm_bridge_attach(encoder, &ib->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(dev, "Unable to attach bridge\n");
return ret;
}
connector = drm_bridge_connector_init(drm, encoder);
if (IS_ERR(connector)) {
dev_err(dev, "Unable to init connector\n");
return PTR_ERR(connector);
}
drm_connector_attach_encoder(connector, encoder);
}
drm_for_each_encoder(encoder, drm) {
clone_mask |= BIT(drm_encoder_index(encoder));
}
drm_for_each_encoder(encoder, drm) {
encoder->possible_clones = clone_mask;
}
ret = devm_request_irq(dev, irq, ingenic_drm_irq_handler, 0, drm->driver->name, drm);
if (ret) {
dev_err(dev, "Unable to install IRQ handler\n");
return ret;
}
ret = drm_vblank_init(drm, 1);
if (ret) {
dev_err(dev, "Failed calling drm_vblank_init()\n");
return ret;
}
drm_mode_config_reset(drm);
ret = clk_prepare_enable(priv->pix_clk);
if (ret) {
dev_err(dev, "Unable to start pixel clock\n");
return ret;
}
if (priv->lcd_clk) {
parent_clk = clk_get_parent(priv->lcd_clk);
parent_rate = clk_get_rate(parent_clk);
/* LCD Device clock must be 3x the pixel clock for STN panels,
* or 1.5x the pixel clock for TFT panels. To avoid having to
* check for the LCD device clock everytime we do a mode change,
* we set the LCD device clock to the highest rate possible.
*/
ret = clk_set_rate(priv->lcd_clk, parent_rate);
if (ret) {
dev_err(dev, "Unable to set LCD clock rate\n");
goto err_pixclk_disable;
}
ret = clk_prepare_enable(priv->lcd_clk);
if (ret) {
dev_err(dev, "Unable to start lcd clock\n");
goto err_pixclk_disable;
}
}
/* Enable OSD if available */
if (soc_info->has_osd)
osdc |= JZ_LCD_OSDC_OSDEN;
if (soc_info->has_alpha)
osdc |= JZ_LCD_OSDC_ALPHAEN;
regmap_write(priv->map, JZ_REG_LCD_OSDC, osdc);
mutex_init(&priv->clk_mutex);
priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
parent_clk = clk_get_parent(priv->pix_clk);
ret = clk_notifier_register(parent_clk, &priv->clock_nb);
if (ret) {
dev_err(dev, "Unable to register clock notifier\n");
goto err_devclk_disable;
}
private_state = kzalloc(sizeof(*private_state), GFP_KERNEL);
if (!private_state) {
ret = -ENOMEM;
goto err_clk_notifier_unregister;
}
drm_atomic_private_obj_init(drm, &priv->private_obj, &private_state->base,
&ingenic_drm_private_state_funcs);
ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini,
&priv->private_obj);
if (ret)
goto err_private_state_free;
ret = drm_dev_register(drm, 0);
if (ret) {
dev_err(dev, "Failed to register DRM driver\n");
goto err_clk_notifier_unregister;
}
drm_fbdev_generic_setup(drm, 32);
return 0;
err_private_state_free:
kfree(private_state);
err_clk_notifier_unregister:
clk_notifier_unregister(parent_clk, &priv->clock_nb);
err_devclk_disable:
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
err_pixclk_disable:
clk_disable_unprepare(priv->pix_clk);
return ret;
}
static int ingenic_drm_bind_with_components(struct device *dev)
{
return ingenic_drm_bind(dev, true);
}
static void ingenic_drm_unbind(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
struct clk *parent_clk = clk_get_parent(priv->pix_clk);
clk_notifier_unregister(parent_clk, &priv->clock_nb);
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
clk_disable_unprepare(priv->pix_clk);
drm_dev_unregister(&priv->drm);
drm_atomic_helper_shutdown(&priv->drm);
}
static const struct component_master_ops ingenic_master_ops = {
.bind = ingenic_drm_bind_with_components,
.unbind = ingenic_drm_unbind,
};
static int ingenic_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
struct device_node *np;
if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
return ingenic_drm_bind(dev, false);
/* IPU is at port address 8 */
np = of_graph_get_remote_node(dev->of_node, 8, 0);
if (!np)
return ingenic_drm_bind(dev, false);
drm_of_component_match_add(dev, &match, component_compare_of, np);
of_node_put(np);
return component_master_add_with_match(dev, &ingenic_master_ops, match);
}
static int ingenic_drm_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
ingenic_drm_unbind(dev);
else
component_master_del(dev, &ingenic_master_ops);
return 0;
}
static int ingenic_drm_suspend(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(&priv->drm);
}
static int ingenic_drm_resume(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(&priv->drm);
}
static DEFINE_SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops,
ingenic_drm_suspend, ingenic_drm_resume);
static const u32 jz4740_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const u32 jz4725b_formats_f1[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const u32 jz4725b_formats_f0[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const u32 jz4770_formats_f1[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB2101010,
};
static const u32 jz4770_formats_f0[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB2101010,
};
static const struct jz_soc_info jz4740_soc_info = {
.needs_dev_clk = true,
.has_osd = false,
.map_noncoherent = false,
.max_width = 800,
.max_height = 600,
.max_burst = JZ_LCD_CTRL_BURST_16,
.formats_f1 = jz4740_formats,
.num_formats_f1 = ARRAY_SIZE(jz4740_formats),
/* JZ4740 has only one plane */
};
static const struct jz_soc_info jz4725b_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
.map_noncoherent = false,
.max_width = 800,
.max_height = 600,
.max_burst = JZ_LCD_CTRL_BURST_16,
.formats_f1 = jz4725b_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4725b_formats_f1),
.formats_f0 = jz4725b_formats_f0,
.num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0),
};
static const struct jz_soc_info jz4760_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
.map_noncoherent = false,
.max_width = 1280,
.max_height = 720,
.max_burst = JZ_LCD_CTRL_BURST_32,
.formats_f1 = jz4770_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
.formats_f0 = jz4770_formats_f0,
.num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
static const struct jz_soc_info jz4760b_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
.map_noncoherent = false,
.max_width = 1280,
.max_height = 720,
.max_burst = JZ_LCD_CTRL_BURST_64,
.formats_f1 = jz4770_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
.formats_f0 = jz4770_formats_f0,
.num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
static const struct jz_soc_info jz4770_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
.map_noncoherent = true,
.max_width = 1280,
.max_height = 720,
.max_burst = JZ_LCD_CTRL_BURST_64,
.formats_f1 = jz4770_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
.formats_f0 = jz4770_formats_f0,
.num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
static const struct jz_soc_info jz4780_soc_info = {
.needs_dev_clk = true,
.has_osd = true,
.has_alpha = true,
.use_extended_hwdesc = true,
.plane_f0_not_working = true, /* REVISIT */
.max_width = 4096,
.max_height = 2048,
.max_burst = JZ_LCD_CTRL_BURST_64,
.formats_f1 = jz4770_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
.formats_f0 = jz4770_formats_f0,
.num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
{ .compatible = "ingenic,jz4760-lcd", .data = &jz4760_soc_info },
{ .compatible = "ingenic,jz4760b-lcd", .data = &jz4760b_soc_info },
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ .compatible = "ingenic,jz4780-lcd", .data = &jz4780_soc_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
.name = "ingenic-drm",
.pm = pm_sleep_ptr(&ingenic_drm_pm_ops),
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
.remove = ingenic_drm_remove,
};
static int ingenic_drm_init(void)
{
int err;
if (drm_firmware_drivers_only())
return -ENODEV;
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
err = platform_driver_register(ingenic_ipu_driver_ptr);
if (err)
return err;
}
err = platform_driver_register(&ingenic_drm_driver);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err)
platform_driver_unregister(ingenic_ipu_driver_ptr);
return err;
}
module_init(ingenic_drm_init);
static void ingenic_drm_exit(void)
{
platform_driver_unregister(&ingenic_drm_driver);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
platform_driver_unregister(ingenic_ipu_driver_ptr);
}
module_exit(ingenic_drm_exit);
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("DRM driver for the Ingenic SoCs\n");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/ingenic/ingenic-drm-drv.c |
// SPDX-License-Identifier: GPL-2.0
//
// Ingenic JZ47xx IPU driver
//
// Copyright (C) 2020, Paul Cercueil <[email protected]>
// Copyright (C) 2020, Daniel Silsby <[email protected]>
#include "ingenic-drm.h"
#include "ingenic-ipu.h"
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/time.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_property.h>
#include <drm/drm_vblank.h>
struct ingenic_ipu;
struct soc_info {
const u32 *formats;
size_t num_formats;
bool has_bicubic;
bool manual_restart;
void (*set_coefs)(struct ingenic_ipu *ipu, unsigned int reg,
unsigned int sharpness, bool downscale,
unsigned int weight, unsigned int offset);
};
struct ingenic_ipu_private_state {
struct drm_private_state base;
unsigned int num_w, num_h, denom_w, denom_h;
};
struct ingenic_ipu {
struct drm_plane plane;
struct drm_device *drm;
struct device *dev, *master;
struct regmap *map;
struct clk *clk;
const struct soc_info *soc_info;
bool clk_enabled;
dma_addr_t addr_y, addr_u, addr_v;
struct drm_property *sharpness_prop;
unsigned int sharpness;
struct drm_private_obj private_obj;
};
/* Signed 15.16 fixed-point math (for bicubic scaling coefficients) */
#define I2F(i) ((s32)(i) * 65536)
#define F2I(f) ((f) / 65536)
#define FMUL(fa, fb) ((s32)(((s64)(fa) * (s64)(fb)) / 65536))
#define SHARPNESS_INCR (I2F(-1) / 8)
static inline struct ingenic_ipu *plane_to_ingenic_ipu(struct drm_plane *plane)
{
return container_of(plane, struct ingenic_ipu, plane);
}
static inline struct ingenic_ipu_private_state *
to_ingenic_ipu_priv_state(struct drm_private_state *state)
{
return container_of(state, struct ingenic_ipu_private_state, base);
}
static struct ingenic_ipu_private_state *
ingenic_ipu_get_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state)
{
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_ingenic_ipu_priv_state(priv_state);
}
static struct ingenic_ipu_private_state *
ingenic_ipu_get_new_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state)
{
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj);
if (!priv_state)
return NULL;
return to_ingenic_ipu_priv_state(priv_state);
}
/*
* Apply conventional cubic convolution kernel. Both parameters
* and return value are 15.16 signed fixed-point.
*
* @f_a: Sharpness factor, typically in range [-4.0, -0.25].
* A larger magnitude increases perceived sharpness, but going past
* -2.0 might cause ringing artifacts to outweigh any improvement.
* Nice values on a 320x240 LCD are between -0.75 and -2.0.
*
* @f_x: Absolute distance in pixels from 'pixel 0' sample position
* along horizontal (or vertical) source axis. Range is [0, +2.0].
*
* returns: Weight of this pixel within 4-pixel sample group. Range is
* [-2.0, +2.0]. For moderate (i.e. > -3.0) sharpness factors,
* range is within [-1.0, +1.0].
*/
static inline s32 cubic_conv(s32 f_a, s32 f_x)
{
const s32 f_1 = I2F(1);
const s32 f_2 = I2F(2);
const s32 f_3 = I2F(3);
const s32 f_4 = I2F(4);
const s32 f_x2 = FMUL(f_x, f_x);
const s32 f_x3 = FMUL(f_x, f_x2);
if (f_x <= f_1)
return FMUL((f_a + f_2), f_x3) - FMUL((f_a + f_3), f_x2) + f_1;
else if (f_x <= f_2)
return FMUL(f_a, (f_x3 - 5 * f_x2 + 8 * f_x - f_4));
else
return 0;
}
/*
* On entry, "weight" is a coefficient suitable for bilinear mode,
* which is converted to a set of four suitable for bicubic mode.
*
* "weight 512" means all of pixel 0;
* "weight 256" means half of pixel 0 and half of pixel 1;
* "weight 0" means all of pixel 1;
*
* "offset" is increment to next source pixel sample location.
*/
static void jz4760_set_coefs(struct ingenic_ipu *ipu, unsigned int reg,
unsigned int sharpness, bool downscale,
unsigned int weight, unsigned int offset)
{
u32 val;
s32 w0, w1, w2, w3; /* Pixel weights at X (or Y) offsets -1,0,1,2 */
weight = clamp_val(weight, 0, 512);
if (sharpness < 2) {
/*
* When sharpness setting is 0, emulate nearest-neighbor.
* When sharpness setting is 1, emulate bilinear.
*/
if (sharpness == 0)
weight = weight >= 256 ? 512 : 0;
w0 = 0;
w1 = weight;
w2 = 512 - weight;
w3 = 0;
} else {
const s32 f_a = SHARPNESS_INCR * sharpness;
const s32 f_h = I2F(1) / 2; /* Round up 0.5 */
/*
* Note that always rounding towards +infinity here is intended.
* The resulting coefficients match a round-to-nearest-int
* double floating-point implementation.
*/
weight = 512 - weight;
w0 = F2I(f_h + 512 * cubic_conv(f_a, I2F(512 + weight) / 512));
w1 = F2I(f_h + 512 * cubic_conv(f_a, I2F(0 + weight) / 512));
w2 = F2I(f_h + 512 * cubic_conv(f_a, I2F(512 - weight) / 512));
w3 = F2I(f_h + 512 * cubic_conv(f_a, I2F(1024 - weight) / 512));
w0 = clamp_val(w0, -1024, 1023);
w1 = clamp_val(w1, -1024, 1023);
w2 = clamp_val(w2, -1024, 1023);
w3 = clamp_val(w3, -1024, 1023);
}
val = ((w1 & JZ4760_IPU_RSZ_COEF_MASK) << JZ4760_IPU_RSZ_COEF31_LSB) |
((w0 & JZ4760_IPU_RSZ_COEF_MASK) << JZ4760_IPU_RSZ_COEF20_LSB);
regmap_write(ipu->map, reg, val);
val = ((w3 & JZ4760_IPU_RSZ_COEF_MASK) << JZ4760_IPU_RSZ_COEF31_LSB) |
((w2 & JZ4760_IPU_RSZ_COEF_MASK) << JZ4760_IPU_RSZ_COEF20_LSB) |
((offset & JZ4760_IPU_RSZ_OFFSET_MASK) << JZ4760_IPU_RSZ_OFFSET_LSB);
regmap_write(ipu->map, reg, val);
}
static void jz4725b_set_coefs(struct ingenic_ipu *ipu, unsigned int reg,
unsigned int sharpness, bool downscale,
unsigned int weight, unsigned int offset)
{
u32 val = JZ4725B_IPU_RSZ_LUT_OUT_EN;
unsigned int i;
weight = clamp_val(weight, 0, 512);
if (sharpness == 0)
weight = weight >= 256 ? 512 : 0;
val |= (weight & JZ4725B_IPU_RSZ_LUT_COEF_MASK) << JZ4725B_IPU_RSZ_LUT_COEF_LSB;
if (downscale || !!offset)
val |= JZ4725B_IPU_RSZ_LUT_IN_EN;
regmap_write(ipu->map, reg, val);
if (downscale) {
for (i = 1; i < offset; i++)
regmap_write(ipu->map, reg, JZ4725B_IPU_RSZ_LUT_IN_EN);
}
}
static void ingenic_ipu_set_downscale_coefs(struct ingenic_ipu *ipu,
unsigned int reg,
unsigned int num,
unsigned int denom)
{
unsigned int i, offset, weight, weight_num = denom;
for (i = 0; i < num; i++) {
weight_num = num + (weight_num - num) % (num * 2);
weight = 512 - 512 * (weight_num - num) / (num * 2);
weight_num += denom * 2;
offset = (weight_num - num) / (num * 2);
ipu->soc_info->set_coefs(ipu, reg, ipu->sharpness,
true, weight, offset);
}
}
static void ingenic_ipu_set_integer_upscale_coefs(struct ingenic_ipu *ipu,
unsigned int reg,
unsigned int num)
{
/*
* Force nearest-neighbor scaling and use simple math when upscaling
* by an integer ratio. It looks better, and fixes a few problem cases.
*/
unsigned int i;
for (i = 0; i < num; i++)
ipu->soc_info->set_coefs(ipu, reg, 0, false, 512, i == num - 1);
}
static void ingenic_ipu_set_upscale_coefs(struct ingenic_ipu *ipu,
unsigned int reg,
unsigned int num,
unsigned int denom)
{
unsigned int i, offset, weight, weight_num = 0;
for (i = 0; i < num; i++) {
weight = 512 - 512 * weight_num / num;
weight_num += denom;
offset = weight_num >= num;
if (offset)
weight_num -= num;
ipu->soc_info->set_coefs(ipu, reg, ipu->sharpness,
false, weight, offset);
}
}
static void ingenic_ipu_set_coefs(struct ingenic_ipu *ipu, unsigned int reg,
unsigned int num, unsigned int denom)
{
/* Begin programming the LUT */
regmap_write(ipu->map, reg, -1);
if (denom > num)
ingenic_ipu_set_downscale_coefs(ipu, reg, num, denom);
else if (denom == 1)
ingenic_ipu_set_integer_upscale_coefs(ipu, reg, num);
else
ingenic_ipu_set_upscale_coefs(ipu, reg, num, denom);
}
static int reduce_fraction(unsigned int *num, unsigned int *denom)
{
unsigned long d = gcd(*num, *denom);
/* The scaling table has only 31 entries */
if (*num > 31 * d)
return -EINVAL;
*num /= d;
*denom /= d;
return 0;
}
static inline bool osd_changed(struct drm_plane_state *state,
struct drm_plane_state *oldstate)
{
return state->src_x != oldstate->src_x ||
state->src_y != oldstate->src_y ||
state->src_w != oldstate->src_w ||
state->src_h != oldstate->src_h ||
state->crtc_x != oldstate->crtc_x ||
state->crtc_y != oldstate->crtc_y ||
state->crtc_w != oldstate->crtc_w ||
state->crtc_h != oldstate->crtc_h;
}
static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane);
const struct drm_format_info *finfo;
u32 ctrl, stride = 0, coef_index = 0, format = 0;
bool needs_modeset, upscaling_w, upscaling_h;
struct ingenic_ipu_private_state *ipu_state;
int err;
if (!newstate || !newstate->fb)
return;
ipu_state = ingenic_ipu_get_new_priv_state(ipu, state);
if (WARN_ON(!ipu_state))
return;
finfo = drm_format_info(newstate->fb->format->format);
if (!ipu->clk_enabled) {
err = clk_enable(ipu->clk);
if (err) {
dev_err(ipu->dev, "Unable to enable clock: %d\n", err);
return;
}
ipu->clk_enabled = true;
}
/* Reset all the registers if needed */
needs_modeset = drm_atomic_crtc_needs_modeset(newstate->crtc->state);
if (needs_modeset) {
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RST);
/* Enable the chip */
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL,
JZ_IPU_CTRL_CHIP_EN | JZ_IPU_CTRL_LCDC_SEL);
}
if (ingenic_drm_map_noncoherent(ipu->master))
drm_fb_dma_sync_non_coherent(ipu->drm, oldstate, newstate);
/* New addresses will be committed in vblank handler... */
ipu->addr_y = drm_fb_dma_get_gem_addr(newstate->fb, newstate, 0);
if (finfo->num_planes > 1)
ipu->addr_u = drm_fb_dma_get_gem_addr(newstate->fb, newstate,
1);
if (finfo->num_planes > 2)
ipu->addr_v = drm_fb_dma_get_gem_addr(newstate->fb, newstate,
2);
if (!needs_modeset)
return;
/* Or right here if we're doing a full modeset. */
regmap_write(ipu->map, JZ_REG_IPU_Y_ADDR, ipu->addr_y);
regmap_write(ipu->map, JZ_REG_IPU_U_ADDR, ipu->addr_u);
regmap_write(ipu->map, JZ_REG_IPU_V_ADDR, ipu->addr_v);
if (finfo->num_planes == 1)
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_SPKG_SEL);
ingenic_drm_plane_config(ipu->master, plane, DRM_FORMAT_XRGB8888);
/* Set the input height/width/strides */
if (finfo->num_planes > 2)
stride = ((newstate->src_w >> 16) * finfo->cpp[2] / finfo->hsub)
<< JZ_IPU_UV_STRIDE_V_LSB;
if (finfo->num_planes > 1)
stride |= ((newstate->src_w >> 16) * finfo->cpp[1] / finfo->hsub)
<< JZ_IPU_UV_STRIDE_U_LSB;
regmap_write(ipu->map, JZ_REG_IPU_UV_STRIDE, stride);
stride = ((newstate->src_w >> 16) * finfo->cpp[0]) << JZ_IPU_Y_STRIDE_Y_LSB;
regmap_write(ipu->map, JZ_REG_IPU_Y_STRIDE, stride);
regmap_write(ipu->map, JZ_REG_IPU_IN_GS,
(stride << JZ_IPU_IN_GS_W_LSB) |
((newstate->src_h >> 16) << JZ_IPU_IN_GS_H_LSB));
switch (finfo->format) {
case DRM_FORMAT_XRGB1555:
format = JZ_IPU_D_FMT_IN_FMT_RGB555 |
JZ_IPU_D_FMT_RGB_OUT_OFT_RGB;
break;
case DRM_FORMAT_XBGR1555:
format = JZ_IPU_D_FMT_IN_FMT_RGB555 |
JZ_IPU_D_FMT_RGB_OUT_OFT_BGR;
break;
case DRM_FORMAT_RGB565:
format = JZ_IPU_D_FMT_IN_FMT_RGB565 |
JZ_IPU_D_FMT_RGB_OUT_OFT_RGB;
break;
case DRM_FORMAT_BGR565:
format = JZ_IPU_D_FMT_IN_FMT_RGB565 |
JZ_IPU_D_FMT_RGB_OUT_OFT_BGR;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XYUV8888:
format = JZ_IPU_D_FMT_IN_FMT_RGB888 |
JZ_IPU_D_FMT_RGB_OUT_OFT_RGB;
break;
case DRM_FORMAT_XBGR8888:
format = JZ_IPU_D_FMT_IN_FMT_RGB888 |
JZ_IPU_D_FMT_RGB_OUT_OFT_BGR;
break;
case DRM_FORMAT_YUYV:
format = JZ_IPU_D_FMT_IN_FMT_YUV422 |
JZ_IPU_D_FMT_YUV_VY1UY0;
break;
case DRM_FORMAT_YVYU:
format = JZ_IPU_D_FMT_IN_FMT_YUV422 |
JZ_IPU_D_FMT_YUV_UY1VY0;
break;
case DRM_FORMAT_UYVY:
format = JZ_IPU_D_FMT_IN_FMT_YUV422 |
JZ_IPU_D_FMT_YUV_Y1VY0U;
break;
case DRM_FORMAT_VYUY:
format = JZ_IPU_D_FMT_IN_FMT_YUV422 |
JZ_IPU_D_FMT_YUV_Y1UY0V;
break;
case DRM_FORMAT_YUV411:
format = JZ_IPU_D_FMT_IN_FMT_YUV411;
break;
case DRM_FORMAT_YUV420:
format = JZ_IPU_D_FMT_IN_FMT_YUV420;
break;
case DRM_FORMAT_YUV422:
format = JZ_IPU_D_FMT_IN_FMT_YUV422;
break;
case DRM_FORMAT_YUV444:
format = JZ_IPU_D_FMT_IN_FMT_YUV444;
break;
default:
WARN_ONCE(1, "Unsupported format");
break;
}
/* Fix output to RGB888 */
format |= JZ_IPU_D_FMT_OUT_FMT_RGB888;
/* Set pixel format */
regmap_write(ipu->map, JZ_REG_IPU_D_FMT, format);
/* Set the output height/width/stride */
regmap_write(ipu->map, JZ_REG_IPU_OUT_GS,
((newstate->crtc_w * 4) << JZ_IPU_OUT_GS_W_LSB)
| newstate->crtc_h << JZ_IPU_OUT_GS_H_LSB);
regmap_write(ipu->map, JZ_REG_IPU_OUT_STRIDE, newstate->crtc_w * 4);
if (finfo->is_yuv) {
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CSC_EN);
/*
* Offsets for Chroma/Luma.
* y = source Y - LUMA,
* u = source Cb - CHROMA,
* v = source Cr - CHROMA
*/
regmap_write(ipu->map, JZ_REG_IPU_CSC_OFFSET,
128 << JZ_IPU_CSC_OFFSET_CHROMA_LSB |
0 << JZ_IPU_CSC_OFFSET_LUMA_LSB);
/*
* YUV422 to RGB conversion table.
* R = C0 / 0x400 * y + C1 / 0x400 * v
* G = C0 / 0x400 * y - C2 / 0x400 * u - C3 / 0x400 * v
* B = C0 / 0x400 * y + C4 / 0x400 * u
*/
regmap_write(ipu->map, JZ_REG_IPU_CSC_C0_COEF, 0x4a8);
regmap_write(ipu->map, JZ_REG_IPU_CSC_C1_COEF, 0x662);
regmap_write(ipu->map, JZ_REG_IPU_CSC_C2_COEF, 0x191);
regmap_write(ipu->map, JZ_REG_IPU_CSC_C3_COEF, 0x341);
regmap_write(ipu->map, JZ_REG_IPU_CSC_C4_COEF, 0x811);
}
ctrl = 0;
/*
* Must set ZOOM_SEL before programming bicubic LUTs.
* If the IPU supports bicubic, we enable it unconditionally, since it
* can do anything bilinear can and more.
*/
if (ipu->soc_info->has_bicubic)
ctrl |= JZ_IPU_CTRL_ZOOM_SEL;
upscaling_w = ipu_state->num_w > ipu_state->denom_w;
if (upscaling_w)
ctrl |= JZ_IPU_CTRL_HSCALE;
if (ipu_state->num_w != 1 || ipu_state->denom_w != 1) {
if (!ipu->soc_info->has_bicubic && !upscaling_w)
coef_index |= (ipu_state->denom_w - 1) << 16;
else
coef_index |= (ipu_state->num_w - 1) << 16;
ctrl |= JZ_IPU_CTRL_HRSZ_EN;
}
upscaling_h = ipu_state->num_h > ipu_state->denom_h;
if (upscaling_h)
ctrl |= JZ_IPU_CTRL_VSCALE;
if (ipu_state->num_h != 1 || ipu_state->denom_h != 1) {
if (!ipu->soc_info->has_bicubic && !upscaling_h)
coef_index |= ipu_state->denom_h - 1;
else
coef_index |= ipu_state->num_h - 1;
ctrl |= JZ_IPU_CTRL_VRSZ_EN;
}
regmap_update_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_ZOOM_SEL |
JZ_IPU_CTRL_HRSZ_EN | JZ_IPU_CTRL_VRSZ_EN |
JZ_IPU_CTRL_HSCALE | JZ_IPU_CTRL_VSCALE, ctrl);
/* Set the LUT index register */
regmap_write(ipu->map, JZ_REG_IPU_RSZ_COEF_INDEX, coef_index);
if (ipu_state->num_w != 1 || ipu_state->denom_w != 1)
ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_HRSZ_COEF_LUT,
ipu_state->num_w, ipu_state->denom_w);
if (ipu_state->num_h != 1 || ipu_state->denom_h != 1)
ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_VRSZ_COEF_LUT,
ipu_state->num_h, ipu_state->denom_h);
/* Clear STATUS register */
regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0);
/* Start IPU */
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL,
JZ_IPU_CTRL_RUN | JZ_IPU_CTRL_FM_IRQ_EN);
dev_dbg(ipu->dev, "Scaling %ux%u to %ux%u (%u:%u horiz, %u:%u vert)\n",
newstate->src_w >> 16, newstate->src_h >> 16,
newstate->crtc_w, newstate->crtc_h,
ipu_state->num_w, ipu_state->denom_w,
ipu_state->num_h, ipu_state->denom_h);
}
static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
unsigned int num_w, denom_w, num_h, denom_h, xres, yres, max_w, max_h;
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
struct drm_crtc_state *crtc_state;
struct ingenic_ipu_private_state *ipu_state;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
ipu_state = ingenic_ipu_get_priv_state(ipu, state);
if (IS_ERR(ipu_state))
return PTR_ERR(ipu_state);
/* Request a full modeset if we are enabling or disabling the IPU. */
if (!old_plane_state->crtc ^ !new_plane_state->crtc)
crtc_state->mode_changed = true;
if (!new_plane_state->crtc ||
!crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay)
goto out_check_damage;
/* Plane must be fully visible */
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 ||
new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->mode.hdisplay ||
new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->mode.vdisplay)
return -EINVAL;
/* Minimum size is 4x4 */
if ((new_plane_state->src_w >> 16) < 4 || (new_plane_state->src_h >> 16) < 4)
return -EINVAL;
/* Input and output lines must have an even number of pixels. */
if (((new_plane_state->src_w >> 16) & 1) || (new_plane_state->crtc_w & 1))
return -EINVAL;
if (!osd_changed(new_plane_state, old_plane_state))
goto out_check_damage;
crtc_state->mode_changed = true;
xres = new_plane_state->src_w >> 16;
yres = new_plane_state->src_h >> 16;
/*
* Increase the scaled image's theorical width/height until we find a
* configuration that has valid scaling coefficients, up to 102% of the
* screen's resolution. This makes sure that we can scale from almost
* every resolution possible at the cost of a very small distorsion.
* The CRTC_W / CRTC_H are not modified.
*/
max_w = crtc_state->mode.hdisplay * 102 / 100;
max_h = crtc_state->mode.vdisplay * 102 / 100;
for (denom_w = xres, num_w = new_plane_state->crtc_w; num_w <= max_w; num_w++)
if (!reduce_fraction(&num_w, &denom_w))
break;
if (num_w > max_w)
return -EINVAL;
for (denom_h = yres, num_h = new_plane_state->crtc_h; num_h <= max_h; num_h++)
if (!reduce_fraction(&num_h, &denom_h))
break;
if (num_h > max_h)
return -EINVAL;
ipu_state->num_w = num_w;
ipu_state->num_h = num_h;
ipu_state->denom_w = denom_w;
ipu_state->denom_h = denom_h;
out_check_damage:
if (ingenic_drm_map_noncoherent(ipu->master))
drm_atomic_helper_check_plane_damage(state, new_plane_state);
return 0;
}
static void ingenic_ipu_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_STOP);
regmap_clear_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CHIP_EN);
ingenic_drm_plane_disable(ipu->master, plane);
if (ipu->clk_enabled) {
clk_disable(ipu->clk);
ipu->clk_enabled = false;
}
}
static const struct drm_plane_helper_funcs ingenic_ipu_plane_helper_funcs = {
.atomic_update = ingenic_ipu_plane_atomic_update,
.atomic_check = ingenic_ipu_plane_atomic_check,
.atomic_disable = ingenic_ipu_plane_atomic_disable,
};
static int
ingenic_ipu_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property, u64 *val)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
if (property != ipu->sharpness_prop)
return -EINVAL;
*val = ipu->sharpness;
return 0;
}
static int
ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property, u64 val)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_crtc_state *crtc_state;
bool mode_changed;
if (property != ipu->sharpness_prop)
return -EINVAL;
mode_changed = val != ipu->sharpness;
ipu->sharpness = val;
if (state->crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
crtc_state->mode_changed |= mode_changed;
}
return 0;
}
static const struct drm_plane_funcs ingenic_ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.destroy = drm_plane_cleanup,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_get_property = ingenic_ipu_plane_atomic_get_property,
.atomic_set_property = ingenic_ipu_plane_atomic_set_property,
};
static struct drm_private_state *
ingenic_ipu_duplicate_state(struct drm_private_obj *obj)
{
struct ingenic_ipu_private_state *state = to_ingenic_ipu_priv_state(obj->state);
state = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void ingenic_ipu_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct ingenic_ipu_private_state *priv_state = to_ingenic_ipu_priv_state(state);
kfree(priv_state);
}
static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = {
.atomic_duplicate_state = ingenic_ipu_duplicate_state,
.atomic_destroy_state = ingenic_ipu_destroy_state,
};
static irqreturn_t ingenic_ipu_irq_handler(int irq, void *arg)
{
struct ingenic_ipu *ipu = arg;
struct drm_crtc *crtc = drm_crtc_from_index(ipu->drm, 0);
unsigned int dummy;
/* dummy read allows CPU to reconfigure IPU */
if (ipu->soc_info->manual_restart)
regmap_read(ipu->map, JZ_REG_IPU_STATUS, &dummy);
/* ACK interrupt */
regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0);
/* Set previously cached addresses */
regmap_write(ipu->map, JZ_REG_IPU_Y_ADDR, ipu->addr_y);
regmap_write(ipu->map, JZ_REG_IPU_U_ADDR, ipu->addr_u);
regmap_write(ipu->map, JZ_REG_IPU_V_ADDR, ipu->addr_v);
/* Run IPU for the new frame */
if (ipu->soc_info->manual_restart)
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RUN);
drm_crtc_handle_vblank(crtc);
return IRQ_HANDLED;
}
static const struct regmap_config ingenic_ipu_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = JZ_REG_IPU_OUT_PHY_T_ADDR,
};
static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
{
struct platform_device *pdev = to_platform_device(dev);
struct ingenic_ipu_private_state *private_state;
const struct soc_info *soc_info;
struct drm_device *drm = d;
struct drm_plane *plane;
struct ingenic_ipu *ipu;
void __iomem *base;
unsigned int sharpness_max;
int err, irq;
ipu = devm_kzalloc(dev, sizeof(*ipu), GFP_KERNEL);
if (!ipu)
return -ENOMEM;
soc_info = of_device_get_match_data(dev);
if (!soc_info) {
dev_err(dev, "Missing platform data\n");
return -EINVAL;
}
ipu->dev = dev;
ipu->drm = drm;
ipu->master = master;
ipu->soc_info = soc_info;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource\n");
return PTR_ERR(base);
}
ipu->map = devm_regmap_init_mmio(dev, base, &ingenic_ipu_regmap_config);
if (IS_ERR(ipu->map)) {
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(ipu->map);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ipu->clk = devm_clk_get(dev, "ipu");
if (IS_ERR(ipu->clk)) {
dev_err(dev, "Failed to get pixel clock\n");
return PTR_ERR(ipu->clk);
}
err = devm_request_irq(dev, irq, ingenic_ipu_irq_handler, 0,
dev_name(dev), ipu);
if (err) {
dev_err(dev, "Unable to request IRQ\n");
return err;
}
plane = &ipu->plane;
dev_set_drvdata(dev, plane);
drm_plane_helper_add(plane, &ingenic_ipu_plane_helper_funcs);
err = drm_universal_plane_init(drm, plane, 1, &ingenic_ipu_plane_funcs,
soc_info->formats, soc_info->num_formats,
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (err) {
dev_err(dev, "Failed to init plane: %i\n", err);
return err;
}
if (ingenic_drm_map_noncoherent(master))
drm_plane_enable_fb_damage_clips(plane);
/*
* Sharpness settings range is [0,32]
* 0 : nearest-neighbor
* 1 : bilinear
* 2 .. 32 : bicubic (translated to sharpness factor -0.25 .. -4.0)
*/
sharpness_max = soc_info->has_bicubic ? 32 : 1;
ipu->sharpness_prop = drm_property_create_range(drm, 0, "sharpness",
0, sharpness_max);
if (!ipu->sharpness_prop) {
dev_err(dev, "Unable to create sharpness property\n");
return -ENOMEM;
}
/* Default sharpness factor: -0.125 * 8 = -1.0 */
ipu->sharpness = soc_info->has_bicubic ? 8 : 1;
drm_object_attach_property(&plane->base, ipu->sharpness_prop,
ipu->sharpness);
err = clk_prepare(ipu->clk);
if (err) {
dev_err(dev, "Unable to prepare clock\n");
return err;
}
private_state = kzalloc(sizeof(*private_state), GFP_KERNEL);
if (!private_state) {
err = -ENOMEM;
goto err_clk_unprepare;
}
drm_atomic_private_obj_init(drm, &ipu->private_obj, &private_state->base,
&ingenic_ipu_private_state_funcs);
return 0;
err_clk_unprepare:
clk_unprepare(ipu->clk);
return err;
}
static void ingenic_ipu_unbind(struct device *dev,
struct device *master, void *d)
{
struct ingenic_ipu *ipu = dev_get_drvdata(dev);
drm_atomic_private_obj_fini(&ipu->private_obj);
clk_unprepare(ipu->clk);
}
static const struct component_ops ingenic_ipu_ops = {
.bind = ingenic_ipu_bind,
.unbind = ingenic_ipu_unbind,
};
static int ingenic_ipu_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &ingenic_ipu_ops);
}
static int ingenic_ipu_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &ingenic_ipu_ops);
return 0;
}
static const u32 jz4725b_ipu_formats[] = {
/*
* While officially supported, packed YUV 4:2:2 formats can cause
* random hardware crashes on JZ4725B under certain circumstances.
* It seems to happen with some specific resize ratios.
* Until a proper workaround or fix is found, disable these formats.
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
*/
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
};
static const struct soc_info jz4725b_soc_info = {
.formats = jz4725b_ipu_formats,
.num_formats = ARRAY_SIZE(jz4725b_ipu_formats),
.has_bicubic = false,
.manual_restart = true,
.set_coefs = jz4725b_set_coefs,
};
static const u32 jz4760_ipu_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
DRM_FORMAT_XYUV8888,
};
static const struct soc_info jz4760_soc_info = {
.formats = jz4760_ipu_formats,
.num_formats = ARRAY_SIZE(jz4760_ipu_formats),
.has_bicubic = true,
.manual_restart = false,
.set_coefs = jz4760_set_coefs,
};
static const struct of_device_id ingenic_ipu_of_match[] = {
{ .compatible = "ingenic,jz4725b-ipu", .data = &jz4725b_soc_info },
{ .compatible = "ingenic,jz4760-ipu", .data = &jz4760_soc_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ingenic_ipu_of_match);
static struct platform_driver ingenic_ipu_driver = {
.driver = {
.name = "ingenic-ipu",
.of_match_table = ingenic_ipu_of_match,
},
.probe = ingenic_ipu_probe,
.remove = ingenic_ipu_remove,
};
struct platform_driver *ingenic_ipu_driver_ptr = &ingenic_ipu_driver;
| linux-master | drivers/gpu/drm/ingenic/ingenic-ipu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
* Copyright (C) 2019, 2020 Paul Boddie <[email protected]>
*
* Derived from dw_hdmi-imx.c with i.MX portions removed.
*/
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
static const struct dw_hdmi_mpll_config ingenic_mpll_cfg[] = {
{ 45250000, { { 0x01e0, 0x0000 }, { 0x21e1, 0x0000 }, { 0x41e2, 0x0000 } } },
{ 92500000, { { 0x0140, 0x0005 }, { 0x2141, 0x0005 }, { 0x4142, 0x0005 } } },
{ 148500000, { { 0x00a0, 0x000a }, { 0x20a1, 0x000a }, { 0x40a2, 0x000a } } },
{ 216000000, { { 0x00a0, 0x000a }, { 0x2001, 0x000f }, { 0x4002, 0x000f } } },
{ ~0UL, { { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, { 0x0000, 0x0000 } } }
};
static const struct dw_hdmi_curr_ctrl ingenic_cur_ctr[] = {
/*pixelclk bpp8 bpp10 bpp12 */
{ 54000000, { 0x091c, 0x091c, 0x06dc } },
{ 58400000, { 0x091c, 0x06dc, 0x06dc } },
{ 72000000, { 0x06dc, 0x06dc, 0x091c } },
{ 74250000, { 0x06dc, 0x0b5c, 0x091c } },
{ 118800000, { 0x091c, 0x091c, 0x06dc } },
{ 216000000, { 0x06dc, 0x0b5c, 0x091c } },
{ ~0UL, { 0x0000, 0x0000, 0x0000 } },
};
/*
* Resistance term 133Ohm Cfg
* PREEMP config 0.00
* TX/CK level 10
*/
static const struct dw_hdmi_phy_config ingenic_phy_config[] = {
/*pixelclk symbol term vlev */
{ 216000000, 0x800d, 0x0005, 0x01ad},
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
static enum drm_mode_status
ingenic_dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
if (mode->clock < 13500)
return MODE_CLOCK_LOW;
/* FIXME: Hardware is capable of 270MHz, but setup data is missing. */
if (mode->clock > 216000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static struct dw_hdmi_plat_data ingenic_dw_hdmi_plat_data = {
.mpll_cfg = ingenic_mpll_cfg,
.cur_ctr = ingenic_cur_ctr,
.phy_config = ingenic_phy_config,
.mode_valid = ingenic_dw_hdmi_mode_valid,
.output_port = 1,
};
static const struct of_device_id ingenic_dw_hdmi_dt_ids[] = {
{ .compatible = "ingenic,jz4780-dw-hdmi" },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ingenic_dw_hdmi_dt_ids);
static void ingenic_dw_hdmi_cleanup(void *data)
{
struct dw_hdmi *hdmi = (struct dw_hdmi *)data;
dw_hdmi_remove(hdmi);
}
static int ingenic_dw_hdmi_probe(struct platform_device *pdev)
{
struct dw_hdmi *hdmi;
hdmi = dw_hdmi_probe(pdev, &ingenic_dw_hdmi_plat_data);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
return devm_add_action_or_reset(&pdev->dev, ingenic_dw_hdmi_cleanup, hdmi);
}
static struct platform_driver ingenic_dw_hdmi_driver = {
.probe = ingenic_dw_hdmi_probe,
.driver = {
.name = "dw-hdmi-ingenic",
.of_match_table = ingenic_dw_hdmi_dt_ids,
},
};
module_platform_driver(ingenic_dw_hdmi_driver);
MODULE_DESCRIPTION("JZ4780 Specific DW-HDMI Driver Extension");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:dw-hdmi-ingenic");
| linux-master | drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ZynqMP Display Controller Driver
*
* Copyright (C) 2017 - 2020 Xilinx, Inc.
*
* Authors:
* - Hyun Woo Kwon <[email protected]>
* - Laurent Pinchart <[email protected]>
*/
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <linux/clk.h>
#include <linux/dma/xilinx_dpdma.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "zynqmp_disp.h"
#include "zynqmp_disp_regs.h"
#include "zynqmp_dp.h"
#include "zynqmp_dpsub.h"
/*
* Overview
* --------
*
* The display controller part of ZynqMP DP subsystem, made of the Audio/Video
* Buffer Manager, the Video Rendering Pipeline (blender) and the Audio Mixer.
*
* +------------------------------------------------------------+
* +--------+ | +----------------+ +-----------+ |
* | DPDMA | --->| | --> | Video | Video +-------------+ |
* | 4x vid | | | | | Rendering | -+--> | | | +------+
* | 2x aud | | | Audio/Video | --> | Pipeline | | | DisplayPort |---> | PHY0 |
* +--------+ | | Buffer Manager | +-----------+ | | Source | | +------+
* | | and STC | +-----------+ | | Controller | | +------+
* Live Video --->| | --> | Audio | Audio | |---> | PHY1 |
* | | | | Mixer | --+-> | | | +------+
* Live Audio --->| | --> | | || +-------------+ |
* | +----------------+ +-----------+ || |
* +---------------------------------------||-------------------+
* vv
* Blended Video and
* Mixed Audio to PL
*
* Only non-live input from the DPDMA and output to the DisplayPort Source
* Controller are currently supported. Interface with the programmable logic
* for live streams is not implemented.
*
* The display controller code creates planes for the DPDMA video and graphics
* layers, and a CRTC for the Video Rendering Pipeline.
*/
#define ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS 4
#define ZYNQMP_DISP_AV_BUF_NUM_BUFFERS 6
#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
/**
* struct zynqmp_disp_format - Display subsystem format information
* @drm_fmt: DRM format (4CC)
* @buf_fmt: AV buffer format
* @swap: Flag to swap R & B for RGB formats, and U & V for YUV formats
* @sf: Scaling factors for color components
*/
struct zynqmp_disp_format {
u32 drm_fmt;
u32 buf_fmt;
bool swap;
const u32 *sf;
};
/**
* struct zynqmp_disp_layer_dma - DMA channel for one data plane of a layer
* @chan: DMA channel
* @xt: Interleaved DMA descriptor template
* @sgl: Data chunk for dma_interleaved_template
*/
struct zynqmp_disp_layer_dma {
struct dma_chan *chan;
struct dma_interleaved_template xt;
struct data_chunk sgl;
};
/**
* struct zynqmp_disp_layer_info - Static layer information
* @formats: Array of supported formats
* @num_formats: Number of formats in @formats array
* @num_channels: Number of DMA channels
*/
struct zynqmp_disp_layer_info {
const struct zynqmp_disp_format *formats;
unsigned int num_formats;
unsigned int num_channels;
};
/**
* struct zynqmp_disp_layer - Display layer
* @id: Layer ID
* @disp: Back pointer to struct zynqmp_disp
* @info: Static layer information
* @dmas: DMA channels
* @disp_fmt: Current format information
* @drm_fmt: Current DRM format information
* @mode: Current operation mode
*/
struct zynqmp_disp_layer {
enum zynqmp_dpsub_layer_id id;
struct zynqmp_disp *disp;
const struct zynqmp_disp_layer_info *info;
struct zynqmp_disp_layer_dma dmas[ZYNQMP_DISP_MAX_NUM_SUB_PLANES];
const struct zynqmp_disp_format *disp_fmt;
const struct drm_format_info *drm_fmt;
enum zynqmp_dpsub_layer_mode mode;
};
/**
* struct zynqmp_disp - Display controller
* @dev: Device structure
* @dpsub: Display subsystem
* @blend.base: Register I/O base address for the blender
* @avbuf.base: Register I/O base address for the audio/video buffer manager
* @audio.base: Registers I/O base address for the audio mixer
* @layers: Layers (planes)
*/
struct zynqmp_disp {
struct device *dev;
struct zynqmp_dpsub *dpsub;
struct {
void __iomem *base;
} blend;
struct {
void __iomem *base;
} avbuf;
struct {
void __iomem *base;
} audio;
struct zynqmp_disp_layer layers[ZYNQMP_DPSUB_NUM_LAYERS];
};
/* -----------------------------------------------------------------------------
* Audio/Video Buffer Manager
*/
static const u32 scaling_factors_444[] = {
ZYNQMP_DISP_AV_BUF_4BIT_SF,
ZYNQMP_DISP_AV_BUF_4BIT_SF,
ZYNQMP_DISP_AV_BUF_4BIT_SF,
};
static const u32 scaling_factors_555[] = {
ZYNQMP_DISP_AV_BUF_5BIT_SF,
ZYNQMP_DISP_AV_BUF_5BIT_SF,
ZYNQMP_DISP_AV_BUF_5BIT_SF,
};
static const u32 scaling_factors_565[] = {
ZYNQMP_DISP_AV_BUF_5BIT_SF,
ZYNQMP_DISP_AV_BUF_6BIT_SF,
ZYNQMP_DISP_AV_BUF_5BIT_SF,
};
static const u32 scaling_factors_888[] = {
ZYNQMP_DISP_AV_BUF_8BIT_SF,
ZYNQMP_DISP_AV_BUF_8BIT_SF,
ZYNQMP_DISP_AV_BUF_8BIT_SF,
};
static const u32 scaling_factors_101010[] = {
ZYNQMP_DISP_AV_BUF_10BIT_SF,
ZYNQMP_DISP_AV_BUF_10BIT_SF,
ZYNQMP_DISP_AV_BUF_10BIT_SF,
};
/* List of video layer formats */
static const struct zynqmp_disp_format avbuf_vid_fmts[] = {
{
.drm_fmt = DRM_FORMAT_VYUY,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_UYVY,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YUYV,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YVYU,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YUV422,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YVU422,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YUV444,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YVU444,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_NV16,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_NV61,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_BGR888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_RGB888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_XBGR8888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_XRGB8888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_XBGR2101010,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
.swap = false,
.sf = scaling_factors_101010,
}, {
.drm_fmt = DRM_FORMAT_XRGB2101010,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
.swap = true,
.sf = scaling_factors_101010,
}, {
.drm_fmt = DRM_FORMAT_YUV420,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_YVU420,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_NV12,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_NV21,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
.swap = true,
.sf = scaling_factors_888,
},
};
/* List of graphics layer formats */
static const struct zynqmp_disp_format avbuf_gfx_fmts[] = {
{
.drm_fmt = DRM_FORMAT_ABGR8888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_ARGB8888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_RGBA8888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_BGRA8888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
.swap = true,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_BGR888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_RGB888,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888,
.swap = false,
.sf = scaling_factors_888,
}, {
.drm_fmt = DRM_FORMAT_RGBA5551,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
.swap = false,
.sf = scaling_factors_555,
}, {
.drm_fmt = DRM_FORMAT_BGRA5551,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
.swap = true,
.sf = scaling_factors_555,
}, {
.drm_fmt = DRM_FORMAT_RGBA4444,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
.swap = false,
.sf = scaling_factors_444,
}, {
.drm_fmt = DRM_FORMAT_BGRA4444,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
.swap = true,
.sf = scaling_factors_444,
}, {
.drm_fmt = DRM_FORMAT_RGB565,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
.swap = false,
.sf = scaling_factors_565,
}, {
.drm_fmt = DRM_FORMAT_BGR565,
.buf_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
.swap = true,
.sf = scaling_factors_565,
},
};
static u32 zynqmp_disp_avbuf_read(struct zynqmp_disp *disp, int reg)
{
return readl(disp->avbuf.base + reg);
}
static void zynqmp_disp_avbuf_write(struct zynqmp_disp *disp, int reg, u32 val)
{
writel(val, disp->avbuf.base + reg);
}
static bool zynqmp_disp_layer_is_video(const struct zynqmp_disp_layer *layer)
{
return layer->id == ZYNQMP_DPSUB_LAYER_VID;
}
/**
* zynqmp_disp_avbuf_set_format - Set the input format for a layer
* @disp: Display controller
* @layer: The layer
* @fmt: The format information
*
* Set the video buffer manager format for @layer to @fmt.
*/
static void zynqmp_disp_avbuf_set_format(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer,
const struct zynqmp_disp_format *fmt)
{
unsigned int i;
u32 val;
val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_FMT);
val &= zynqmp_disp_layer_is_video(layer)
? ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK
: ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
val |= fmt->buf_fmt;
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_FMT, val);
for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++) {
unsigned int reg = zynqmp_disp_layer_is_video(layer)
? ZYNQMP_DISP_AV_BUF_VID_COMP_SF(i)
: ZYNQMP_DISP_AV_BUF_GFX_COMP_SF(i);
zynqmp_disp_avbuf_write(disp, reg, fmt->sf[i]);
}
}
/**
* zynqmp_disp_avbuf_set_clocks_sources - Set the clocks sources
* @disp: Display controller
* @video_from_ps: True if the video clock originates from the PS
* @audio_from_ps: True if the audio clock originates from the PS
* @timings_internal: True if video timings are generated internally
*
* Set the source for the video and audio clocks, as well as for the video
* timings. Clocks can originate from the PS or PL, and timings can be
* generated internally or externally.
*/
static void
zynqmp_disp_avbuf_set_clocks_sources(struct zynqmp_disp *disp,
bool video_from_ps, bool audio_from_ps,
bool timings_internal)
{
u32 val = 0;
if (video_from_ps)
val |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
if (audio_from_ps)
val |= ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
if (timings_internal)
val |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CLK_SRC, val);
}
/**
* zynqmp_disp_avbuf_enable_channels - Enable buffer channels
* @disp: Display controller
*
* Enable all (video and audio) buffer channels.
*/
static void zynqmp_disp_avbuf_enable_channels(struct zynqmp_disp *disp)
{
unsigned int i;
u32 val;
val = ZYNQMP_DISP_AV_BUF_CHBUF_EN |
(ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX <<
ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT);
for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CHBUF(i),
val);
val = ZYNQMP_DISP_AV_BUF_CHBUF_EN |
(ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT);
for (; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CHBUF(i),
val);
}
/**
* zynqmp_disp_avbuf_disable_channels - Disable buffer channels
* @disp: Display controller
*
* Disable all (video and audio) buffer channels.
*/
static void zynqmp_disp_avbuf_disable_channels(struct zynqmp_disp *disp)
{
unsigned int i;
for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CHBUF(i),
ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH);
}
/**
* zynqmp_disp_avbuf_enable_audio - Enable audio
* @disp: Display controller
*
* Enable all audio buffers with a non-live (memory) source.
*/
static void zynqmp_disp_avbuf_enable_audio(struct zynqmp_disp *disp)
{
u32 val;
val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
}
/**
* zynqmp_disp_avbuf_disable_audio - Disable audio
* @disp: Display controller
*
* Disable all audio buffers.
*/
static void zynqmp_disp_avbuf_disable_audio(struct zynqmp_disp *disp)
{
u32 val;
val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE;
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
}
/**
* zynqmp_disp_avbuf_enable_video - Enable a video layer
* @disp: Display controller
* @layer: The layer
*
* Enable the video/graphics buffer for @layer.
*/
static void zynqmp_disp_avbuf_enable_video(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
u32 val;
val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
if (zynqmp_disp_layer_is_video(layer)) {
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
else
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE;
} else {
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
else
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
}
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
}
/**
* zynqmp_disp_avbuf_disable_video - Disable a video layer
* @disp: Display controller
* @layer: The layer
*
* Disable the video/graphics buffer for @layer.
*/
static void zynqmp_disp_avbuf_disable_video(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
u32 val;
val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
if (zynqmp_disp_layer_is_video(layer)) {
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE;
} else {
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE;
}
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
}
/**
* zynqmp_disp_avbuf_enable - Enable the video pipe
* @disp: Display controller
*
* De-assert the video pipe reset.
*/
static void zynqmp_disp_avbuf_enable(struct zynqmp_disp *disp)
{
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
}
/**
* zynqmp_disp_avbuf_disable - Disable the video pipe
* @disp: Display controller
*
* Assert the video pipe reset.
*/
static void zynqmp_disp_avbuf_disable(struct zynqmp_disp *disp)
{
zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_SRST_REG,
ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST);
}
/* -----------------------------------------------------------------------------
* Blender (Video Pipeline)
*/
static void zynqmp_disp_blend_write(struct zynqmp_disp *disp, int reg, u32 val)
{
writel(val, disp->blend.base + reg);
}
/*
* Colorspace conversion matrices.
*
* Hardcode RGB <-> YUV conversion to full-range SDTV for now.
*/
static const u16 csc_zero_matrix[] = {
0x0, 0x0, 0x0,
0x0, 0x0, 0x0,
0x0, 0x0, 0x0
};
static const u16 csc_identity_matrix[] = {
0x1000, 0x0, 0x0,
0x0, 0x1000, 0x0,
0x0, 0x0, 0x1000
};
static const u32 csc_zero_offsets[] = {
0, 0, 0
};
static const u16 csc_rgb_to_sdtv_matrix[] = {
0x4c9, 0x864, 0x1d3,
0x7d4d, 0x7ab3, 0x800,
0x800, 0x794d, 0x7eb3
};
static const u32 csc_rgb_to_sdtv_offsets[] = {
0x0, 0x8000000, 0x8000000
};
static const u16 csc_sdtv_to_rgb_matrix[] = {
0x1000, 0x166f, 0x0,
0x1000, 0x7483, 0x7a7f,
0x1000, 0x0, 0x1c5a
};
static const u32 csc_sdtv_to_rgb_offsets[] = {
0x0, 0x1800, 0x1800
};
/**
* zynqmp_disp_blend_set_output_format - Set the output format of the blender
* @disp: Display controller
* @format: Output format
*
* Set the output format of the blender to @format.
*/
static void zynqmp_disp_blend_set_output_format(struct zynqmp_disp *disp,
enum zynqmp_dpsub_format format)
{
static const unsigned int blend_output_fmts[] = {
[ZYNQMP_DPSUB_FORMAT_RGB] = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB,
[ZYNQMP_DPSUB_FORMAT_YCRCB444] = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
[ZYNQMP_DPSUB_FORMAT_YCRCB422] = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422
| ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_EN_DOWNSAMPLE,
[ZYNQMP_DPSUB_FORMAT_YONLY] = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY,
};
u32 fmt = blend_output_fmts[format];
const u16 *coeffs;
const u32 *offsets;
unsigned int i;
zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
if (fmt == ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB) {
coeffs = csc_identity_matrix;
offsets = csc_zero_offsets;
} else {
coeffs = csc_rgb_to_sdtv_matrix;
offsets = csc_rgb_to_sdtv_offsets;
}
for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
zynqmp_disp_blend_write(disp,
ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF(i),
coeffs[i]);
for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
zynqmp_disp_blend_write(disp,
ZYNQMP_DISP_V_BLEND_OUTCSC_OFFSET(i),
offsets[i]);
}
/**
* zynqmp_disp_blend_set_bg_color - Set the background color
* @disp: Display controller
* @rcr: Red/Cr color component
* @gy: Green/Y color component
* @bcb: Blue/Cb color component
*
* Set the background color to (@rcr, @gy, @bcb), corresponding to the R, G and
* B or Cr, Y and Cb components respectively depending on the selected output
* format.
*/
static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp *disp,
u32 rcr, u32 gy, u32 bcb)
{
zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_BG_CLR_0, rcr);
zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_BG_CLR_1, gy);
zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_BG_CLR_2, bcb);
}
/**
* zynqmp_disp_blend_set_global_alpha - Configure global alpha blending
* @disp: Display controller
* @enable: True to enable global alpha blending
* @alpha: Global alpha value (ignored if @enabled is false)
*/
void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
bool enable, u32 alpha)
{
zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_VALUE(alpha) |
(enable ? ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_EN : 0));
}
/**
* zynqmp_disp_blend_layer_set_csc - Configure colorspace conversion for layer
* @disp: Display controller
* @layer: The layer
* @coeffs: Colorspace conversion matrix
* @offsets: Colorspace conversion offsets
*
* Configure the input colorspace conversion matrix and offsets for the @layer.
* Columns of the matrix are automatically swapped based on the input format to
* handle RGB and YCrCb components permutations.
*/
static void zynqmp_disp_blend_layer_set_csc(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer,
const u16 *coeffs,
const u32 *offsets)
{
unsigned int swap[3] = { 0, 1, 2 };
unsigned int reg;
unsigned int i;
if (layer->disp_fmt->swap) {
if (layer->drm_fmt->is_yuv) {
/* Swap U and V. */
swap[1] = 2;
swap[2] = 1;
} else {
/* Swap R and B. */
swap[0] = 2;
swap[2] = 0;
}
}
if (zynqmp_disp_layer_is_video(layer))
reg = ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF(0);
else
reg = ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF(0);
for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i += 3, reg += 12) {
zynqmp_disp_blend_write(disp, reg + 0, coeffs[i + swap[0]]);
zynqmp_disp_blend_write(disp, reg + 4, coeffs[i + swap[1]]);
zynqmp_disp_blend_write(disp, reg + 8, coeffs[i + swap[2]]);
}
if (zynqmp_disp_layer_is_video(layer))
reg = ZYNQMP_DISP_V_BLEND_IN1CSC_OFFSET(0);
else
reg = ZYNQMP_DISP_V_BLEND_IN2CSC_OFFSET(0);
for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
zynqmp_disp_blend_write(disp, reg + i * 4, offsets[i]);
}
/**
* zynqmp_disp_blend_layer_enable - Enable a layer
* @disp: Display controller
* @layer: The layer
*/
static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
const u16 *coeffs;
const u32 *offsets;
u32 val;
val = (layer->drm_fmt->is_yuv ?
0 : ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB) |
(layer->drm_fmt->hsub > 1 ?
ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US : 0);
zynqmp_disp_blend_write(disp,
ZYNQMP_DISP_V_BLEND_LAYER_CONTROL(layer->id),
val);
if (layer->drm_fmt->is_yuv) {
coeffs = csc_sdtv_to_rgb_matrix;
offsets = csc_sdtv_to_rgb_offsets;
} else {
coeffs = csc_identity_matrix;
offsets = csc_zero_offsets;
}
zynqmp_disp_blend_layer_set_csc(disp, layer, coeffs, offsets);
}
/**
* zynqmp_disp_blend_layer_disable - Disable a layer
* @disp: Display controller
* @layer: The layer
*/
static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
zynqmp_disp_blend_write(disp,
ZYNQMP_DISP_V_BLEND_LAYER_CONTROL(layer->id),
0);
zynqmp_disp_blend_layer_set_csc(disp, layer, csc_zero_matrix,
csc_zero_offsets);
}
/* -----------------------------------------------------------------------------
* Audio Mixer
*/
static void zynqmp_disp_audio_write(struct zynqmp_disp *disp, int reg, u32 val)
{
writel(val, disp->audio.base + reg);
}
/**
* zynqmp_disp_audio_enable - Enable the audio mixer
* @disp: Display controller
*
* Enable the audio mixer by de-asserting the soft reset. The audio state is set to
* default values by the reset, set the default mixer volume explicitly.
*/
static void zynqmp_disp_audio_enable(struct zynqmp_disp *disp)
{
/* Clear the audio soft reset register as it's an non-reset flop. */
zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_MIXER_VOLUME,
ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
}
/**
* zynqmp_disp_audio_disable - Disable the audio mixer
* @disp: Display controller
*
* Disable the audio mixer by asserting its soft reset.
*/
static void zynqmp_disp_audio_disable(struct zynqmp_disp *disp)
{
zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET,
ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
}
/* -----------------------------------------------------------------------------
* ZynqMP Display Layer & DRM Plane
*/
/**
* zynqmp_disp_layer_find_format - Find format information for a DRM format
* @layer: The layer
* @drm_fmt: DRM format to search
*
* Search display subsystem format information corresponding to the given DRM
* format @drm_fmt for the @layer, and return a pointer to the format
* descriptor.
*
* Return: A pointer to the format descriptor if found, NULL otherwise
*/
static const struct zynqmp_disp_format *
zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
u32 drm_fmt)
{
unsigned int i;
for (i = 0; i < layer->info->num_formats; i++) {
if (layer->info->formats[i].drm_fmt == drm_fmt)
return &layer->info->formats[i];
}
return NULL;
}
/**
* zynqmp_disp_layer_drm_formats - Return the DRM formats supported by the layer
* @layer: The layer
* @num_formats: Pointer to the returned number of formats
*
* Return: A newly allocated u32 array that stores all the DRM formats
* supported by the layer. The number of formats in the array is returned
* through the num_formats argument.
*/
u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int *num_formats)
{
unsigned int i;
u32 *formats;
formats = kcalloc(layer->info->num_formats, sizeof(*formats),
GFP_KERNEL);
if (!formats)
return NULL;
for (i = 0; i < layer->info->num_formats; ++i)
formats[i] = layer->info->formats[i].drm_fmt;
*num_formats = layer->info->num_formats;
return formats;
}
/**
* zynqmp_disp_layer_enable - Enable a layer
* @layer: The layer
* @mode: Operating mode of layer
*
* Enable the @layer in the audio/video buffer manager and the blender. DMA
* channels are started separately by zynqmp_disp_layer_update().
*/
void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
enum zynqmp_dpsub_layer_mode mode)
{
layer->mode = mode;
zynqmp_disp_avbuf_enable_video(layer->disp, layer);
zynqmp_disp_blend_layer_enable(layer->disp, layer);
}
/**
* zynqmp_disp_layer_disable - Disable the layer
* @layer: The layer
*
* Disable the layer by stopping its DMA channels and disabling it in the
* audio/video buffer manager and the blender.
*/
void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
{
unsigned int i;
if (layer->disp->dpsub->dma_enabled) {
for (i = 0; i < layer->drm_fmt->num_planes; i++)
dmaengine_terminate_sync(layer->dmas[i].chan);
}
zynqmp_disp_avbuf_disable_video(layer->disp, layer);
zynqmp_disp_blend_layer_disable(layer->disp, layer);
}
/**
* zynqmp_disp_layer_set_format - Set the layer format
* @layer: The layer
* @info: The format info
*
* Set the format for @layer to @info. The layer must be disabled.
*/
void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
const struct drm_format_info *info)
{
unsigned int i;
layer->disp_fmt = zynqmp_disp_layer_find_format(layer, info->format);
layer->drm_fmt = info;
zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
if (!layer->disp->dpsub->dma_enabled)
return;
/*
* Set pconfig for each DMA channel to indicate they're part of a
* video group.
*/
for (i = 0; i < info->num_planes; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
struct xilinx_dpdma_peripheral_config pconfig = {
.video_group = true,
};
struct dma_slave_config config = {
.direction = DMA_MEM_TO_DEV,
.peripheral_config = &pconfig,
.peripheral_size = sizeof(pconfig),
};
dmaengine_slave_config(dma->chan, &config);
}
}
/**
* zynqmp_disp_layer_update - Update the layer framebuffer
* @layer: The layer
* @state: The plane state
*
* Update the framebuffer for the layer by issuing a new DMA engine transaction
* for the new framebuffer.
*
* Return: 0 on success, or the DMA descriptor failure error otherwise
*/
int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
struct drm_plane_state *state)
{
const struct drm_format_info *info = layer->drm_fmt;
unsigned int i;
if (!layer->disp->dpsub->dma_enabled)
return 0;
for (i = 0; i < info->num_planes; i++) {
unsigned int width = state->crtc_w / (i ? info->hsub : 1);
unsigned int height = state->crtc_h / (i ? info->vsub : 1);
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
struct dma_async_tx_descriptor *desc;
dma_addr_t dma_addr;
dma_addr = drm_fb_dma_get_gem_addr(state->fb, state, i);
dma->xt.numf = height;
dma->sgl.size = width * info->cpp[i];
dma->sgl.icg = state->fb->pitches[i] - dma->sgl.size;
dma->xt.src_start = dma_addr;
dma->xt.frame_size = 1;
dma->xt.dir = DMA_MEM_TO_DEV;
dma->xt.src_sgl = true;
dma->xt.dst_sgl = false;
desc = dmaengine_prep_interleaved_dma(dma->chan, &dma->xt,
DMA_CTRL_ACK |
DMA_PREP_REPEAT |
DMA_PREP_LOAD_EOT);
if (!desc) {
dev_err(layer->disp->dev,
"failed to prepare DMA descriptor\n");
return -ENOMEM;
}
dmaengine_submit(desc);
dma_async_issue_pending(dma->chan);
}
return 0;
}
/**
* zynqmp_disp_layer_release_dma - Release DMA channels for a layer
* @disp: Display controller
* @layer: The layer
*
* Release the DMA channels associated with @layer.
*/
static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
unsigned int i;
if (!layer->info || !disp->dpsub->dma_enabled)
return;
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
if (!dma->chan)
continue;
/* Make sure the channel is terminated before release. */
dmaengine_terminate_sync(dma->chan);
dma_release_channel(dma->chan);
}
}
/**
* zynqmp_disp_destroy_layers - Destroy all layers
* @disp: Display controller
*/
static void zynqmp_disp_destroy_layers(struct zynqmp_disp *disp)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(disp->layers); i++)
zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
}
/**
* zynqmp_disp_layer_request_dma - Request DMA channels for a layer
* @disp: Display controller
* @layer: The layer
*
* Request all DMA engine channels needed by @layer.
*
* Return: 0 on success, or the DMA channel request error otherwise
*/
static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
struct zynqmp_disp_layer *layer)
{
static const char * const dma_names[] = { "vid", "gfx" };
unsigned int i;
int ret;
if (!disp->dpsub->dma_enabled)
return 0;
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
char dma_channel_name[16];
snprintf(dma_channel_name, sizeof(dma_channel_name),
"%s%u", dma_names[layer->id], i);
dma->chan = dma_request_chan(disp->dev, dma_channel_name);
if (IS_ERR(dma->chan)) {
ret = dev_err_probe(disp->dev, PTR_ERR(dma->chan),
"failed to request dma channel\n");
dma->chan = NULL;
return ret;
}
}
return 0;
}
/**
* zynqmp_disp_create_layers - Create and initialize all layers
* @disp: Display controller
*
* Return: 0 on success, or the DMA channel request error otherwise
*/
static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
{
static const struct zynqmp_disp_layer_info layer_info[] = {
[ZYNQMP_DPSUB_LAYER_VID] = {
.formats = avbuf_vid_fmts,
.num_formats = ARRAY_SIZE(avbuf_vid_fmts),
.num_channels = 3,
},
[ZYNQMP_DPSUB_LAYER_GFX] = {
.formats = avbuf_gfx_fmts,
.num_formats = ARRAY_SIZE(avbuf_gfx_fmts),
.num_channels = 1,
},
};
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(disp->layers); i++) {
struct zynqmp_disp_layer *layer = &disp->layers[i];
layer->id = i;
layer->disp = disp;
layer->info = &layer_info[i];
ret = zynqmp_disp_layer_request_dma(disp, layer);
if (ret)
goto err;
disp->dpsub->layers[i] = layer;
}
return 0;
err:
zynqmp_disp_destroy_layers(disp);
return ret;
}
/* -----------------------------------------------------------------------------
* ZynqMP Display
*/
/**
* zynqmp_disp_enable - Enable the display controller
* @disp: Display controller
*/
void zynqmp_disp_enable(struct zynqmp_disp *disp)
{
zynqmp_disp_blend_set_output_format(disp, ZYNQMP_DPSUB_FORMAT_RGB);
zynqmp_disp_blend_set_bg_color(disp, 0, 0, 0);
zynqmp_disp_avbuf_enable(disp);
/* Choose clock source based on the DT clock handle. */
zynqmp_disp_avbuf_set_clocks_sources(disp, disp->dpsub->vid_clk_from_ps,
disp->dpsub->aud_clk_from_ps,
true);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
zynqmp_disp_audio_enable(disp);
}
/**
* zynqmp_disp_disable - Disable the display controller
* @disp: Display controller
*/
void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
zynqmp_disp_audio_disable(disp);
zynqmp_disp_avbuf_disable_audio(disp);
zynqmp_disp_avbuf_disable_channels(disp);
zynqmp_disp_avbuf_disable(disp);
}
/**
* zynqmp_disp_setup_clock - Configure the display controller pixel clock rate
* @disp: Display controller
* @mode_clock: The pixel clock rate, in Hz
*
* Return: 0 on success, or a negative error clock otherwise
*/
int zynqmp_disp_setup_clock(struct zynqmp_disp *disp,
unsigned long mode_clock)
{
unsigned long rate;
long diff;
int ret;
ret = clk_set_rate(disp->dpsub->vid_clk, mode_clock);
if (ret) {
dev_err(disp->dev, "failed to set the video clock\n");
return ret;
}
rate = clk_get_rate(disp->dpsub->vid_clk);
diff = rate - mode_clock;
if (abs(diff) > mode_clock / 20)
dev_info(disp->dev,
"requested pixel rate: %lu actual rate: %lu\n",
mode_clock, rate);
else
dev_dbg(disp->dev,
"requested pixel rate: %lu actual rate: %lu\n",
mode_clock, rate);
return 0;
}
/* -----------------------------------------------------------------------------
* Initialization & Cleanup
*/
int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct zynqmp_disp *disp;
int ret;
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
disp->dev = &pdev->dev;
disp->dpsub = dpsub;
disp->blend.base = devm_platform_ioremap_resource_byname(pdev, "blend");
if (IS_ERR(disp->blend.base)) {
ret = PTR_ERR(disp->blend.base);
goto error;
}
disp->avbuf.base = devm_platform_ioremap_resource_byname(pdev, "av_buf");
if (IS_ERR(disp->avbuf.base)) {
ret = PTR_ERR(disp->avbuf.base);
goto error;
}
disp->audio.base = devm_platform_ioremap_resource_byname(pdev, "aud");
if (IS_ERR(disp->audio.base)) {
ret = PTR_ERR(disp->audio.base);
goto error;
}
ret = zynqmp_disp_create_layers(disp);
if (ret)
goto error;
if (disp->dpsub->dma_enabled) {
struct zynqmp_disp_layer *layer;
layer = &disp->layers[ZYNQMP_DPSUB_LAYER_VID];
dpsub->dma_align = 1 << layer->dmas[0].chan->device->copy_align;
}
dpsub->disp = disp;
return 0;
error:
kfree(disp);
return ret;
}
void zynqmp_disp_remove(struct zynqmp_dpsub *dpsub)
{
struct zynqmp_disp *disp = dpsub->disp;
zynqmp_disp_destroy_layers(disp);
}
| linux-master | drivers/gpu/drm/xlnx/zynqmp_disp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ZynqMP DisplayPort Subsystem - KMS API
*
* Copyright (C) 2017 - 2021 Xilinx, Inc.
*
* Authors:
* - Hyun Woo Kwon <[email protected]>
* - Laurent Pinchart <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include "zynqmp_disp.h"
#include "zynqmp_dp.h"
#include "zynqmp_dpsub.h"
#include "zynqmp_kms.h"
static inline struct zynqmp_dpsub *to_zynqmp_dpsub(struct drm_device *drm)
{
return container_of(drm, struct zynqmp_dpsub_drm, dev)->dpsub;
}
/* -----------------------------------------------------------------------------
* DRM Planes
*/
static int zynqmp_dpsub_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
if (!new_plane_state->crtc)
return 0;
crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, false);
}
static void zynqmp_dpsub_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(plane->dev);
struct zynqmp_disp_layer *layer = dpsub->layers[plane->index];
if (!old_state->fb)
return;
zynqmp_disp_layer_disable(layer);
if (plane->index == ZYNQMP_DPSUB_LAYER_GFX)
zynqmp_disp_blend_set_global_alpha(dpsub->disp, false,
plane->state->alpha >> 8);
}
static void zynqmp_dpsub_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(plane->dev);
struct zynqmp_disp_layer *layer = dpsub->layers[plane->index];
bool format_changed = false;
if (!old_state->fb ||
old_state->fb->format->format != new_state->fb->format->format)
format_changed = true;
/*
* If the format has changed (including going from a previously
* disabled state to any format), reconfigure the format. Disable the
* plane first if needed.
*/
if (format_changed) {
if (old_state->fb)
zynqmp_disp_layer_disable(layer);
zynqmp_disp_layer_set_format(layer, new_state->fb->format);
}
zynqmp_disp_layer_update(layer, new_state);
if (plane->index == ZYNQMP_DPSUB_LAYER_GFX)
zynqmp_disp_blend_set_global_alpha(dpsub->disp, true,
plane->state->alpha >> 8);
/* Enable or re-enable the plane if the format has changed. */
if (format_changed)
zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_NONLIVE);
}
static const struct drm_plane_helper_funcs zynqmp_dpsub_plane_helper_funcs = {
.atomic_check = zynqmp_dpsub_plane_atomic_check,
.atomic_update = zynqmp_dpsub_plane_atomic_update,
.atomic_disable = zynqmp_dpsub_plane_atomic_disable,
};
static const struct drm_plane_funcs zynqmp_dpsub_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int zynqmp_dpsub_create_planes(struct zynqmp_dpsub *dpsub)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(dpsub->drm->planes); i++) {
struct zynqmp_disp_layer *layer = dpsub->layers[i];
struct drm_plane *plane = &dpsub->drm->planes[i];
enum drm_plane_type type;
unsigned int num_formats;
u32 *formats;
formats = zynqmp_disp_layer_drm_formats(layer, &num_formats);
if (!formats)
return -ENOMEM;
/* Graphics layer is primary, and video layer is overlay. */
type = i == ZYNQMP_DPSUB_LAYER_VID
? DRM_PLANE_TYPE_OVERLAY : DRM_PLANE_TYPE_PRIMARY;
ret = drm_universal_plane_init(&dpsub->drm->dev, plane, 0,
&zynqmp_dpsub_plane_funcs,
formats, num_formats,
NULL, type, NULL);
kfree(formats);
if (ret)
return ret;
drm_plane_helper_add(plane, &zynqmp_dpsub_plane_helper_funcs);
drm_plane_create_zpos_immutable_property(plane, i);
if (i == ZYNQMP_DPSUB_LAYER_GFX)
drm_plane_create_alpha_property(plane);
}
return 0;
}
/* -----------------------------------------------------------------------------
* DRM CRTC
*/
static inline struct zynqmp_dpsub *crtc_to_dpsub(struct drm_crtc *crtc)
{
return container_of(crtc, struct zynqmp_dpsub_drm, crtc)->dpsub;
}
static void zynqmp_dpsub_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
int ret, vrefresh;
pm_runtime_get_sync(dpsub->dev);
zynqmp_disp_setup_clock(dpsub->disp, adjusted_mode->clock * 1000);
ret = clk_prepare_enable(dpsub->vid_clk);
if (ret) {
dev_err(dpsub->dev, "failed to enable a pixel clock\n");
pm_runtime_put_sync(dpsub->dev);
return;
}
zynqmp_disp_enable(dpsub->disp);
/* Delay of 3 vblank intervals for timing gen to be stable */
vrefresh = (adjusted_mode->clock * 1000) /
(adjusted_mode->vtotal * adjusted_mode->htotal);
msleep(3 * 1000 / vrefresh);
}
static void zynqmp_dpsub_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
struct drm_plane_state *old_plane_state;
/*
* Disable the plane if active. The old plane state can be NULL in the
* .shutdown() path if the plane is already disabled, skip
* zynqmp_disp_plane_atomic_disable() in that case.
*/
old_plane_state = drm_atomic_get_old_plane_state(state, crtc->primary);
if (old_plane_state)
zynqmp_dpsub_plane_atomic_disable(crtc->primary, state);
zynqmp_disp_disable(dpsub->disp);
drm_crtc_vblank_off(crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
clk_disable_unprepare(dpsub->vid_clk);
pm_runtime_put_sync(dpsub->dev);
}
static int zynqmp_dpsub_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
return drm_atomic_add_affected_planes(state, crtc);
}
static void zynqmp_dpsub_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void zynqmp_dpsub_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
if (crtc->state->event) {
struct drm_pending_vblank_event *event;
/* Consume the flip_done event from atomic helper. */
event = crtc->state->event;
crtc->state->event = NULL;
event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_arm_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static const struct drm_crtc_helper_funcs zynqmp_dpsub_crtc_helper_funcs = {
.atomic_enable = zynqmp_dpsub_crtc_atomic_enable,
.atomic_disable = zynqmp_dpsub_crtc_atomic_disable,
.atomic_check = zynqmp_dpsub_crtc_atomic_check,
.atomic_begin = zynqmp_dpsub_crtc_atomic_begin,
.atomic_flush = zynqmp_dpsub_crtc_atomic_flush,
};
static int zynqmp_dpsub_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
zynqmp_dp_enable_vblank(dpsub->dp);
return 0;
}
static void zynqmp_dpsub_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
zynqmp_dp_disable_vblank(dpsub->dp);
}
static const struct drm_crtc_funcs zynqmp_dpsub_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = zynqmp_dpsub_crtc_enable_vblank,
.disable_vblank = zynqmp_dpsub_crtc_disable_vblank,
};
static int zynqmp_dpsub_create_crtc(struct zynqmp_dpsub *dpsub)
{
struct drm_plane *plane = &dpsub->drm->planes[ZYNQMP_DPSUB_LAYER_GFX];
struct drm_crtc *crtc = &dpsub->drm->crtc;
int ret;
ret = drm_crtc_init_with_planes(&dpsub->drm->dev, crtc, plane,
NULL, &zynqmp_dpsub_crtc_funcs, NULL);
if (ret < 0)
return ret;
drm_crtc_helper_add(crtc, &zynqmp_dpsub_crtc_helper_funcs);
/* Start with vertical blanking interrupt reporting disabled. */
drm_crtc_vblank_off(crtc);
return 0;
}
static void zynqmp_dpsub_map_crtc_to_plane(struct zynqmp_dpsub *dpsub)
{
u32 possible_crtcs = drm_crtc_mask(&dpsub->drm->crtc);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(dpsub->drm->planes); i++)
dpsub->drm->planes[i].possible_crtcs = possible_crtcs;
}
/**
* zynqmp_dpsub_drm_handle_vblank - Handle the vblank event
* @dpsub: DisplayPort subsystem
*
* This function handles the vblank interrupt, and sends an event to
* CRTC object. This will be called by the DP vblank interrupt handler.
*/
void zynqmp_dpsub_drm_handle_vblank(struct zynqmp_dpsub *dpsub)
{
drm_crtc_handle_vblank(&dpsub->drm->crtc);
}
/* -----------------------------------------------------------------------------
* Dumb Buffer & Framebuffer Allocation
*/
static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
/* Enforce the alignment constraints of the DMA engine. */
args->pitch = ALIGN(pitch, dpsub->dma_align);
return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
static struct drm_framebuffer *
zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
struct drm_mode_fb_cmd2 cmd = *mode_cmd;
unsigned int i;
/* Enforce the alignment constraints of the DMA engine. */
for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i)
cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align);
return drm_gem_fb_create(drm, file_priv, &cmd);
}
static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
.fb_create = zynqmp_dpsub_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
/* -----------------------------------------------------------------------------
* DRM/KMS Driver
*/
DEFINE_DRM_GEM_DMA_FOPS(zynqmp_dpsub_drm_fops);
static const struct drm_driver zynqmp_dpsub_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
.fops = &zynqmp_dpsub_drm_fops,
.name = "zynqmp-dpsub",
.desc = "Xilinx DisplayPort Subsystem Driver",
.date = "20130509",
.major = 1,
.minor = 0,
};
static int zynqmp_dpsub_kms_init(struct zynqmp_dpsub *dpsub)
{
struct drm_encoder *encoder = &dpsub->drm->encoder;
struct drm_connector *connector;
int ret;
/* Create the planes and the CRTC. */
ret = zynqmp_dpsub_create_planes(dpsub);
if (ret)
return ret;
ret = zynqmp_dpsub_create_crtc(dpsub);
if (ret < 0)
return ret;
zynqmp_dpsub_map_crtc_to_plane(dpsub);
/* Create the encoder and attach the bridge. */
encoder->possible_crtcs |= drm_crtc_mask(&dpsub->drm->crtc);
drm_simple_encoder_init(&dpsub->drm->dev, encoder, DRM_MODE_ENCODER_NONE);
ret = drm_bridge_attach(encoder, dpsub->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(dpsub->dev, "failed to attach bridge to encoder\n");
return ret;
}
/* Create the connector for the chain of bridges. */
connector = drm_bridge_connector_init(&dpsub->drm->dev, encoder);
if (IS_ERR(connector)) {
dev_err(dpsub->dev, "failed to created connector\n");
return PTR_ERR(connector);
}
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(dpsub->dev, "failed to attach connector to encoder\n");
return ret;
}
return 0;
}
static void zynqmp_dpsub_drm_release(struct drm_device *drm, void *res)
{
struct zynqmp_dpsub_drm *dpdrm = res;
zynqmp_dpsub_release(dpdrm->dpsub);
}
int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
{
struct zynqmp_dpsub_drm *dpdrm;
struct drm_device *drm;
int ret;
/*
* Allocate the drm_device and immediately add a cleanup action to
* release the zynqmp_dpsub instance. If any of those operations fail,
* dpsub->drm will remain NULL, which tells the caller that it must
* cleanup manually.
*/
dpdrm = devm_drm_dev_alloc(dpsub->dev, &zynqmp_dpsub_drm_driver,
struct zynqmp_dpsub_drm, dev);
if (IS_ERR(dpdrm))
return PTR_ERR(dpdrm);
dpdrm->dpsub = dpsub;
drm = &dpdrm->dev;
ret = drmm_add_action(drm, zynqmp_dpsub_drm_release, dpdrm);
if (ret < 0)
return ret;
dpsub->drm = dpdrm;
/* Initialize mode config, vblank and the KMS poll helper. */
ret = drmm_mode_config_init(drm);
if (ret < 0)
return ret;
drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = ZYNQMP_DISP_MAX_WIDTH;
drm->mode_config.max_height = ZYNQMP_DISP_MAX_HEIGHT;
ret = drm_vblank_init(drm, 1);
if (ret)
return ret;
drm_kms_helper_poll_init(drm);
ret = zynqmp_dpsub_kms_init(dpsub);
if (ret < 0)
goto err_poll_fini;
/* Reset all components and register the DRM device. */
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto err_poll_fini;
/* Initialize fbdev generic emulation. */
drm_fbdev_dma_setup(drm, 24);
return 0;
err_poll_fini:
drm_kms_helper_poll_fini(drm);
return ret;
}
void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub)
{
struct drm_device *drm = &dpsub->drm->dev;
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
drm_kms_helper_poll_fini(drm);
}
| linux-master | drivers/gpu/drm/xlnx/zynqmp_kms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ZynqMP DisplayPort Driver
*
* Copyright (C) 2017 - 2020 Xilinx, Inc.
*
* Authors:
* - Hyun Woo Kwon <[email protected]>
* - Laurent Pinchart <[email protected]>
*/
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include "zynqmp_disp.h"
#include "zynqmp_dp.h"
#include "zynqmp_dpsub.h"
#include "zynqmp_kms.h"
static uint zynqmp_dp_aux_timeout_ms = 50;
module_param_named(aux_timeout_ms, zynqmp_dp_aux_timeout_ms, uint, 0444);
MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
/*
* Some sink requires a delay after power on request
*/
static uint zynqmp_dp_power_on_delay_ms = 4;
module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
/* Link configuration registers */
#define ZYNQMP_DP_LINK_BW_SET 0x0
#define ZYNQMP_DP_LANE_COUNT_SET 0x4
#define ZYNQMP_DP_ENHANCED_FRAME_EN 0x8
#define ZYNQMP_DP_TRAINING_PATTERN_SET 0xc
#define ZYNQMP_DP_SCRAMBLING_DISABLE 0x14
#define ZYNQMP_DP_DOWNSPREAD_CTL 0x18
#define ZYNQMP_DP_SOFTWARE_RESET 0x1c
#define ZYNQMP_DP_SOFTWARE_RESET_STREAM1 BIT(0)
#define ZYNQMP_DP_SOFTWARE_RESET_STREAM2 BIT(1)
#define ZYNQMP_DP_SOFTWARE_RESET_STREAM3 BIT(2)
#define ZYNQMP_DP_SOFTWARE_RESET_STREAM4 BIT(3)
#define ZYNQMP_DP_SOFTWARE_RESET_AUX BIT(7)
#define ZYNQMP_DP_SOFTWARE_RESET_ALL (ZYNQMP_DP_SOFTWARE_RESET_STREAM1 | \
ZYNQMP_DP_SOFTWARE_RESET_STREAM2 | \
ZYNQMP_DP_SOFTWARE_RESET_STREAM3 | \
ZYNQMP_DP_SOFTWARE_RESET_STREAM4 | \
ZYNQMP_DP_SOFTWARE_RESET_AUX)
/* Core enable registers */
#define ZYNQMP_DP_TRANSMITTER_ENABLE 0x80
#define ZYNQMP_DP_MAIN_STREAM_ENABLE 0x84
#define ZYNQMP_DP_FORCE_SCRAMBLER_RESET 0xc0
#define ZYNQMP_DP_VERSION 0xf8
#define ZYNQMP_DP_VERSION_MAJOR_MASK GENMASK(31, 24)
#define ZYNQMP_DP_VERSION_MAJOR_SHIFT 24
#define ZYNQMP_DP_VERSION_MINOR_MASK GENMASK(23, 16)
#define ZYNQMP_DP_VERSION_MINOR_SHIFT 16
#define ZYNQMP_DP_VERSION_REVISION_MASK GENMASK(15, 12)
#define ZYNQMP_DP_VERSION_REVISION_SHIFT 12
#define ZYNQMP_DP_VERSION_PATCH_MASK GENMASK(11, 8)
#define ZYNQMP_DP_VERSION_PATCH_SHIFT 8
#define ZYNQMP_DP_VERSION_INTERNAL_MASK GENMASK(7, 0)
#define ZYNQMP_DP_VERSION_INTERNAL_SHIFT 0
/* Core ID registers */
#define ZYNQMP_DP_CORE_ID 0xfc
#define ZYNQMP_DP_CORE_ID_MAJOR_MASK GENMASK(31, 24)
#define ZYNQMP_DP_CORE_ID_MAJOR_SHIFT 24
#define ZYNQMP_DP_CORE_ID_MINOR_MASK GENMASK(23, 16)
#define ZYNQMP_DP_CORE_ID_MINOR_SHIFT 16
#define ZYNQMP_DP_CORE_ID_REVISION_MASK GENMASK(15, 8)
#define ZYNQMP_DP_CORE_ID_REVISION_SHIFT 8
#define ZYNQMP_DP_CORE_ID_DIRECTION GENMASK(1)
/* AUX channel interface registers */
#define ZYNQMP_DP_AUX_COMMAND 0x100
#define ZYNQMP_DP_AUX_COMMAND_CMD_SHIFT 8
#define ZYNQMP_DP_AUX_COMMAND_ADDRESS_ONLY BIT(12)
#define ZYNQMP_DP_AUX_COMMAND_BYTES_SHIFT 0
#define ZYNQMP_DP_AUX_WRITE_FIFO 0x104
#define ZYNQMP_DP_AUX_ADDRESS 0x108
#define ZYNQMP_DP_AUX_CLK_DIVIDER 0x10c
#define ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
#define ZYNQMP_DP_INTERRUPT_SIGNAL_STATE 0x130
#define ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD BIT(0)
#define ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST BIT(1)
#define ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY BIT(2)
#define ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
#define ZYNQMP_DP_AUX_REPLY_DATA 0x134
#define ZYNQMP_DP_AUX_REPLY_CODE 0x138
#define ZYNQMP_DP_AUX_REPLY_CODE_AUX_ACK (0)
#define ZYNQMP_DP_AUX_REPLY_CODE_AUX_NACK BIT(0)
#define ZYNQMP_DP_AUX_REPLY_CODE_AUX_DEFER BIT(1)
#define ZYNQMP_DP_AUX_REPLY_CODE_I2C_ACK (0)
#define ZYNQMP_DP_AUX_REPLY_CODE_I2C_NACK BIT(2)
#define ZYNQMP_DP_AUX_REPLY_CODE_I2C_DEFER BIT(3)
#define ZYNQMP_DP_AUX_REPLY_COUNT 0x13c
#define ZYNQMP_DP_REPLY_DATA_COUNT 0x148
#define ZYNQMP_DP_REPLY_DATA_COUNT_MASK 0xff
#define ZYNQMP_DP_INT_STATUS 0x3a0
#define ZYNQMP_DP_INT_MASK 0x3a4
#define ZYNQMP_DP_INT_EN 0x3a8
#define ZYNQMP_DP_INT_DS 0x3ac
#define ZYNQMP_DP_INT_HPD_IRQ BIT(0)
#define ZYNQMP_DP_INT_HPD_EVENT BIT(1)
#define ZYNQMP_DP_INT_REPLY_RECEIVED BIT(2)
#define ZYNQMP_DP_INT_REPLY_TIMEOUT BIT(3)
#define ZYNQMP_DP_INT_HPD_PULSE_DET BIT(4)
#define ZYNQMP_DP_INT_EXT_PKT_TXD BIT(5)
#define ZYNQMP_DP_INT_LIV_ABUF_UNDRFLW BIT(12)
#define ZYNQMP_DP_INT_VBLANK_START BIT(13)
#define ZYNQMP_DP_INT_PIXEL1_MATCH BIT(14)
#define ZYNQMP_DP_INT_PIXEL0_MATCH BIT(15)
#define ZYNQMP_DP_INT_CHBUF_UNDERFLW_MASK 0x3f0000
#define ZYNQMP_DP_INT_CHBUF_OVERFLW_MASK 0xfc00000
#define ZYNQMP_DP_INT_CUST_TS_2 BIT(28)
#define ZYNQMP_DP_INT_CUST_TS BIT(29)
#define ZYNQMP_DP_INT_EXT_VSYNC_TS BIT(30)
#define ZYNQMP_DP_INT_VSYNC_TS BIT(31)
#define ZYNQMP_DP_INT_ALL (ZYNQMP_DP_INT_HPD_IRQ | \
ZYNQMP_DP_INT_HPD_EVENT | \
ZYNQMP_DP_INT_CHBUF_UNDERFLW_MASK | \
ZYNQMP_DP_INT_CHBUF_OVERFLW_MASK)
/* Main stream attribute registers */
#define ZYNQMP_DP_MAIN_STREAM_HTOTAL 0x180
#define ZYNQMP_DP_MAIN_STREAM_VTOTAL 0x184
#define ZYNQMP_DP_MAIN_STREAM_POLARITY 0x188
#define ZYNQMP_DP_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
#define ZYNQMP_DP_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
#define ZYNQMP_DP_MAIN_STREAM_HSWIDTH 0x18c
#define ZYNQMP_DP_MAIN_STREAM_VSWIDTH 0x190
#define ZYNQMP_DP_MAIN_STREAM_HRES 0x194
#define ZYNQMP_DP_MAIN_STREAM_VRES 0x198
#define ZYNQMP_DP_MAIN_STREAM_HSTART 0x19c
#define ZYNQMP_DP_MAIN_STREAM_VSTART 0x1a0
#define ZYNQMP_DP_MAIN_STREAM_MISC0 0x1a4
#define ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK BIT(0)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_RGB (0 << 1)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_YCRCB_422 (5 << 1)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_YCRCB_444 (6 << 1)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_MASK (7 << 1)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_YCBCR_COLR BIT(4)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_6 (0 << 5)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_8 (1 << 5)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_10 (2 << 5)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_12 (3 << 5)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_16 (4 << 5)
#define ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_MASK (7 << 5)
#define ZYNQMP_DP_MAIN_STREAM_MISC1 0x1a8
#define ZYNQMP_DP_MAIN_STREAM_MISC1_Y_ONLY_EN BIT(7)
#define ZYNQMP_DP_MAIN_STREAM_M_VID 0x1ac
#define ZYNQMP_DP_MSA_TRANSFER_UNIT_SIZE 0x1b0
#define ZYNQMP_DP_MSA_TRANSFER_UNIT_SIZE_TU_SIZE_DEF 64
#define ZYNQMP_DP_MAIN_STREAM_N_VID 0x1b4
#define ZYNQMP_DP_USER_PIX_WIDTH 0x1b8
#define ZYNQMP_DP_USER_DATA_COUNT_PER_LANE 0x1bc
#define ZYNQMP_DP_MIN_BYTES_PER_TU 0x1c4
#define ZYNQMP_DP_FRAC_BYTES_PER_TU 0x1c8
#define ZYNQMP_DP_INIT_WAIT 0x1cc
/* PHY configuration and status registers */
#define ZYNQMP_DP_PHY_RESET 0x200
#define ZYNQMP_DP_PHY_RESET_PHY_RESET BIT(0)
#define ZYNQMP_DP_PHY_RESET_GTTX_RESET BIT(1)
#define ZYNQMP_DP_PHY_RESET_PHY_PMA_RESET BIT(8)
#define ZYNQMP_DP_PHY_RESET_PHY_PCS_RESET BIT(9)
#define ZYNQMP_DP_PHY_RESET_ALL_RESET (ZYNQMP_DP_PHY_RESET_PHY_RESET | \
ZYNQMP_DP_PHY_RESET_GTTX_RESET | \
ZYNQMP_DP_PHY_RESET_PHY_PMA_RESET | \
ZYNQMP_DP_PHY_RESET_PHY_PCS_RESET)
#define ZYNQMP_DP_PHY_PREEMPHASIS_LANE_0 0x210
#define ZYNQMP_DP_PHY_PREEMPHASIS_LANE_1 0x214
#define ZYNQMP_DP_PHY_PREEMPHASIS_LANE_2 0x218
#define ZYNQMP_DP_PHY_PREEMPHASIS_LANE_3 0x21c
#define ZYNQMP_DP_PHY_VOLTAGE_DIFF_LANE_0 0x220
#define ZYNQMP_DP_PHY_VOLTAGE_DIFF_LANE_1 0x224
#define ZYNQMP_DP_PHY_VOLTAGE_DIFF_LANE_2 0x228
#define ZYNQMP_DP_PHY_VOLTAGE_DIFF_LANE_3 0x22c
#define ZYNQMP_DP_PHY_CLOCK_SELECT 0x234
#define ZYNQMP_DP_PHY_CLOCK_SELECT_1_62G 0x1
#define ZYNQMP_DP_PHY_CLOCK_SELECT_2_70G 0x3
#define ZYNQMP_DP_PHY_CLOCK_SELECT_5_40G 0x5
#define ZYNQMP_DP_TX_PHY_POWER_DOWN 0x238
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_0 0x23c
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_1 0x240
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_2 0x244
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_3 0x248
#define ZYNQMP_DP_PHY_POSTCURSOR_LANE_0 0x24c
#define ZYNQMP_DP_PHY_POSTCURSOR_LANE_1 0x250
#define ZYNQMP_DP_PHY_POSTCURSOR_LANE_2 0x254
#define ZYNQMP_DP_PHY_POSTCURSOR_LANE_3 0x258
#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
#define ZYNQMP_DP_PHY_STATUS 0x280
#define ZYNQMP_DP_PHY_STATUS_PLL_LOCKED_SHIFT 4
#define ZYNQMP_DP_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
/* Audio registers */
#define ZYNQMP_DP_TX_AUDIO_CONTROL 0x300
#define ZYNQMP_DP_TX_AUDIO_CHANNELS 0x304
#define ZYNQMP_DP_TX_AUDIO_INFO_DATA 0x308
#define ZYNQMP_DP_TX_M_AUD 0x328
#define ZYNQMP_DP_TX_N_AUD 0x32c
#define ZYNQMP_DP_TX_AUDIO_EXT_DATA 0x330
#define ZYNQMP_DP_MAX_LANES 2
#define ZYNQMP_MAX_FREQ 3000000
#define DP_REDUCED_BIT_RATE 162000
#define DP_HIGH_BIT_RATE 270000
#define DP_HIGH_BIT_RATE2 540000
#define DP_MAX_TRAINING_TRIES 5
#define DP_V1_2 0x12
/**
* struct zynqmp_dp_link_config - Common link config between source and sink
* @max_rate: maximum link rate
* @max_lanes: maximum number of lanes
*/
struct zynqmp_dp_link_config {
int max_rate;
u8 max_lanes;
};
/**
* struct zynqmp_dp_mode - Configured mode of DisplayPort
* @bw_code: code for bandwidth(link rate)
* @lane_cnt: number of lanes
* @pclock: pixel clock frequency of current mode
* @fmt: format identifier string
*/
struct zynqmp_dp_mode {
u8 bw_code;
u8 lane_cnt;
int pclock;
const char *fmt;
};
/**
* struct zynqmp_dp_config - Configuration of DisplayPort from DTS
* @misc0: misc0 configuration (per DP v1.2 spec)
* @misc1: misc1 configuration (per DP v1.2 spec)
* @bpp: bits per pixel
*/
struct zynqmp_dp_config {
u8 misc0;
u8 misc1;
u8 bpp;
};
/**
* struct zynqmp_dp - Xilinx DisplayPort core
* @dev: device structure
* @dpsub: Display subsystem
* @iomem: device I/O memory for register access
* @reset: reset controller
* @irq: irq
* @bridge: DRM bridge for the DP encoder
* @next_bridge: The downstream bridge
* @config: IP core configuration from DTS
* @aux: aux channel
* @phy: PHY handles for DP lanes
* @num_lanes: number of enabled phy lanes
* @hpd_work: hot plug detection worker
* @status: connection status
* @enabled: flag to indicate if the device is enabled
* @dpcd: DP configuration data from currently connected sink device
* @link_config: common link configuration between IP core and sink device
* @mode: current mode between IP core and sink device
* @train_set: set of training data
*/
struct zynqmp_dp {
struct device *dev;
struct zynqmp_dpsub *dpsub;
void __iomem *iomem;
struct reset_control *reset;
int irq;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct zynqmp_dp_config config;
struct drm_dp_aux aux;
struct phy *phy[ZYNQMP_DP_MAX_LANES];
u8 num_lanes;
struct delayed_work hpd_work;
enum drm_connector_status status;
bool enabled;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
struct zynqmp_dp_link_config link_config;
struct zynqmp_dp_mode mode;
u8 train_set[ZYNQMP_DP_MAX_LANES];
};
static inline struct zynqmp_dp *bridge_to_dp(struct drm_bridge *bridge)
{
return container_of(bridge, struct zynqmp_dp, bridge);
}
static void zynqmp_dp_write(struct zynqmp_dp *dp, int offset, u32 val)
{
writel(val, dp->iomem + offset);
}
static u32 zynqmp_dp_read(struct zynqmp_dp *dp, int offset)
{
return readl(dp->iomem + offset);
}
static void zynqmp_dp_clr(struct zynqmp_dp *dp, int offset, u32 clr)
{
zynqmp_dp_write(dp, offset, zynqmp_dp_read(dp, offset) & ~clr);
}
static void zynqmp_dp_set(struct zynqmp_dp *dp, int offset, u32 set)
{
zynqmp_dp_write(dp, offset, zynqmp_dp_read(dp, offset) | set);
}
/* -----------------------------------------------------------------------------
* PHY Handling
*/
#define RST_TIMEOUT_MS 1000
static int zynqmp_dp_reset(struct zynqmp_dp *dp, bool assert)
{
unsigned long timeout;
if (assert)
reset_control_assert(dp->reset);
else
reset_control_deassert(dp->reset);
/* Wait for the (de)assert to complete. */
timeout = jiffies + msecs_to_jiffies(RST_TIMEOUT_MS);
while (!time_after_eq(jiffies, timeout)) {
bool status = !!reset_control_status(dp->reset);
if (assert == status)
return 0;
cpu_relax();
}
dev_err(dp->dev, "reset %s timeout\n", assert ? "assert" : "deassert");
return -ETIMEDOUT;
}
/**
* zynqmp_dp_phy_init - Initialize the phy
* @dp: DisplayPort IP core structure
*
* Initialize the phy.
*
* Return: 0 if the phy instances are initialized correctly, or the error code
* returned from the callee functions.
*/
static int zynqmp_dp_phy_init(struct zynqmp_dp *dp)
{
int ret;
int i;
for (i = 0; i < dp->num_lanes; i++) {
ret = phy_init(dp->phy[i]);
if (ret) {
dev_err(dp->dev, "failed to init phy lane %d\n", i);
return ret;
}
}
zynqmp_dp_clr(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
/*
* Power on lanes in reverse order as only lane 0 waits for the PLL to
* lock.
*/
for (i = dp->num_lanes - 1; i >= 0; i--) {
ret = phy_power_on(dp->phy[i]);
if (ret) {
dev_err(dp->dev, "failed to power on phy lane %d\n", i);
return ret;
}
}
return 0;
}
/**
* zynqmp_dp_phy_exit - Exit the phy
* @dp: DisplayPort IP core structure
*
* Exit the phy.
*/
static void zynqmp_dp_phy_exit(struct zynqmp_dp *dp)
{
unsigned int i;
int ret;
for (i = 0; i < dp->num_lanes; i++) {
ret = phy_power_off(dp->phy[i]);
if (ret)
dev_err(dp->dev, "failed to power off phy(%d) %d\n", i,
ret);
}
for (i = 0; i < dp->num_lanes; i++) {
ret = phy_exit(dp->phy[i]);
if (ret)
dev_err(dp->dev, "failed to exit phy(%d) %d\n", i, ret);
}
}
/**
* zynqmp_dp_phy_probe - Probe the PHYs
* @dp: DisplayPort IP core structure
*
* Probe PHYs for all lanes. Less PHYs may be available than the number of
* lanes, which is not considered an error as long as at least one PHY is
* found. The caller can check dp->num_lanes to check how many PHYs were found.
*
* Return:
* * 0 - Success
* * -ENXIO - No PHY found
* * -EPROBE_DEFER - Probe deferral requested
* * Other negative value - PHY retrieval failure
*/
static int zynqmp_dp_phy_probe(struct zynqmp_dp *dp)
{
unsigned int i;
for (i = 0; i < ZYNQMP_DP_MAX_LANES; i++) {
char phy_name[16];
struct phy *phy;
snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
phy = devm_phy_get(dp->dev, phy_name);
if (IS_ERR(phy)) {
switch (PTR_ERR(phy)) {
case -ENODEV:
if (dp->num_lanes)
return 0;
dev_err(dp->dev, "no PHY found\n");
return -ENXIO;
case -EPROBE_DEFER:
return -EPROBE_DEFER;
default:
dev_err(dp->dev, "failed to get PHY lane %u\n",
i);
return PTR_ERR(phy);
}
}
dp->phy[i] = phy;
dp->num_lanes++;
}
return 0;
}
/**
* zynqmp_dp_phy_ready - Check if PHY is ready
* @dp: DisplayPort IP core structure
*
* Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
* This amount of delay was suggested by IP designer.
*
* Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
*/
static int zynqmp_dp_phy_ready(struct zynqmp_dp *dp)
{
u32 i, reg, ready;
ready = (1 << dp->num_lanes) - 1;
/* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
for (i = 0; ; i++) {
reg = zynqmp_dp_read(dp, ZYNQMP_DP_PHY_STATUS);
if ((reg & ready) == ready)
return 0;
if (i == 100) {
dev_err(dp->dev, "PHY isn't ready\n");
return -ENODEV;
}
usleep_range(1000, 1100);
}
return 0;
}
/* -----------------------------------------------------------------------------
* DisplayPort Link Training
*/
/**
* zynqmp_dp_max_rate - Calculate and return available max pixel clock
* @link_rate: link rate (Kilo-bytes / sec)
* @lane_num: number of lanes
* @bpp: bits per pixel
*
* Return: max pixel clock (KHz) supported by current link config.
*/
static inline int zynqmp_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
{
return link_rate * lane_num * 8 / bpp;
}
/**
* zynqmp_dp_mode_configure - Configure the link values
* @dp: DisplayPort IP core structure
* @pclock: pixel clock for requested display mode
* @current_bw: current link rate
*
* Find the link configuration values, rate and lane count for requested pixel
* clock @pclock. The @pclock is stored in the mode to be used in other
* functions later. The returned rate is downshifted from the current rate
* @current_bw.
*
* Return: Current link rate code, or -EINVAL.
*/
static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
u8 current_bw)
{
int max_rate = dp->link_config.max_rate;
u8 bw_code;
u8 max_lanes = dp->link_config.max_lanes;
u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
u8 bpp = dp->config.bpp;
u8 lane_cnt;
/* Downshift from current bandwidth */
switch (current_bw) {
case DP_LINK_BW_5_4:
bw_code = DP_LINK_BW_2_7;
break;
case DP_LINK_BW_2_7:
bw_code = DP_LINK_BW_1_62;
break;
case DP_LINK_BW_1_62:
dev_err(dp->dev, "can't downshift. already lowest link rate\n");
return -EINVAL;
default:
/* If not given, start with max supported */
bw_code = max_link_rate_code;
break;
}
for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
int bw;
u32 rate;
bw = drm_dp_bw_code_to_link_rate(bw_code);
rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
if (pclock <= rate) {
dp->mode.bw_code = bw_code;
dp->mode.lane_cnt = lane_cnt;
dp->mode.pclock = pclock;
return dp->mode.bw_code;
}
}
dev_err(dp->dev, "failed to configure link values\n");
return -EINVAL;
}
/**
* zynqmp_dp_adjust_train - Adjust train values
* @dp: DisplayPort IP core structure
* @link_status: link status from sink which contains requested training values
*/
static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 *train_set = dp->train_set;
u8 voltage = 0, preemphasis = 0;
u8 i;
for (i = 0; i < dp->mode.lane_cnt; i++) {
u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
if (v > voltage)
voltage = v;
if (p > preemphasis)
preemphasis = p;
}
if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
voltage |= DP_TRAIN_MAX_SWING_REACHED;
if (preemphasis >= DP_TRAIN_PRE_EMPH_LEVEL_2)
preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (i = 0; i < dp->mode.lane_cnt; i++)
train_set[i] = voltage | preemphasis;
}
/**
* zynqmp_dp_update_vs_emph - Update the training values
* @dp: DisplayPort IP core structure
*
* Update the training values based on the request from sink. The mapped values
* are predefined, and values(vs, pe, pc) are from the device manual.
*
* Return: 0 if vs and emph are updated successfully, or the error code returned
* by drm_dp_dpcd_write().
*/
static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
{
unsigned int i;
int ret;
ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->train_set,
dp->mode.lane_cnt);
if (ret < 0)
return ret;
for (i = 0; i < dp->mode.lane_cnt; i++) {
u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
union phy_configure_opts opts = { 0 };
u8 train = dp->train_set[i];
opts.dp.voltage[0] = (train & DP_TRAIN_VOLTAGE_SWING_MASK)
>> DP_TRAIN_VOLTAGE_SWING_SHIFT;
opts.dp.pre[0] = (train & DP_TRAIN_PRE_EMPHASIS_MASK)
>> DP_TRAIN_PRE_EMPHASIS_SHIFT;
phy_configure(dp->phy[i], &opts);
zynqmp_dp_write(dp, reg, 0x2);
}
return 0;
}
/**
* zynqmp_dp_link_train_cr - Train clock recovery
* @dp: DisplayPort IP core structure
*
* Return: 0 if clock recovery train is done successfully, or corresponding
* error code.
*/
static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
u8 lane_cnt = dp->mode.lane_cnt;
u8 vs = 0, tries = 0;
u16 max_tries, i;
bool cr_done;
int ret;
zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_1);
ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
if (ret < 0)
return ret;
/*
* 256 loops should be maximum iterations for 4 lanes and 4 values.
* So, This loop should exit before 512 iterations
*/
for (max_tries = 0; max_tries < 512; max_tries++) {
ret = zynqmp_dp_update_vs_emph(dp);
if (ret)
return ret;
drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
if (ret < 0)
return ret;
cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
if (cr_done)
break;
for (i = 0; i < lane_cnt; i++)
if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
break;
if (i == lane_cnt)
break;
if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
tries++;
else
tries = 0;
if (tries == DP_MAX_TRAINING_TRIES)
break;
vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
zynqmp_dp_adjust_train(dp, link_status);
}
if (!cr_done)
return -EIO;
return 0;
}
/**
* zynqmp_dp_link_train_ce - Train channel equalization
* @dp: DisplayPort IP core structure
*
* Return: 0 if channel equalization train is done successfully, or
* corresponding error code.
*/
static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
u8 lane_cnt = dp->mode.lane_cnt;
u32 pat, tries;
int ret;
bool ce_done;
if (dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
pat = DP_TRAINING_PATTERN_3;
else
pat = DP_TRAINING_PATTERN_2;
zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET, pat);
ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
pat | DP_LINK_SCRAMBLING_DISABLE);
if (ret < 0)
return ret;
for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
ret = zynqmp_dp_update_vs_emph(dp);
if (ret)
return ret;
drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
if (ret < 0)
return ret;
ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
if (ce_done)
break;
zynqmp_dp_adjust_train(dp, link_status);
}
if (!ce_done)
return -EIO;
return 0;
}
/**
* zynqmp_dp_train - Train the link
* @dp: DisplayPort IP core structure
*
* Return: 0 if all trains are done successfully, or corresponding error code.
*/
static int zynqmp_dp_train(struct zynqmp_dp *dp)
{
u32 reg;
u8 bw_code = dp->mode.bw_code;
u8 lane_cnt = dp->mode.lane_cnt;
u8 aux_lane_cnt = lane_cnt;
bool enhanced;
int ret;
zynqmp_dp_write(dp, ZYNQMP_DP_LANE_COUNT_SET, lane_cnt);
enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
if (enhanced) {
zynqmp_dp_write(dp, ZYNQMP_DP_ENHANCED_FRAME_EN, 1);
aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
}
if (dp->dpcd[3] & 0x1) {
zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 1);
drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
DP_SPREAD_AMP_0_5);
} else {
zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 0);
drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
}
ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
if (ret < 0) {
dev_err(dp->dev, "failed to set lane count\n");
return ret;
}
ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
DP_SET_ANSI_8B10B);
if (ret < 0) {
dev_err(dp->dev, "failed to set ANSI 8B/10B encoding\n");
return ret;
}
ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
if (ret < 0) {
dev_err(dp->dev, "failed to set DP bandwidth\n");
return ret;
}
zynqmp_dp_write(dp, ZYNQMP_DP_LINK_BW_SET, bw_code);
switch (bw_code) {
case DP_LINK_BW_1_62:
reg = ZYNQMP_DP_PHY_CLOCK_SELECT_1_62G;
break;
case DP_LINK_BW_2_7:
reg = ZYNQMP_DP_PHY_CLOCK_SELECT_2_70G;
break;
case DP_LINK_BW_5_4:
default:
reg = ZYNQMP_DP_PHY_CLOCK_SELECT_5_40G;
break;
}
zynqmp_dp_write(dp, ZYNQMP_DP_PHY_CLOCK_SELECT, reg);
ret = zynqmp_dp_phy_ready(dp);
if (ret < 0)
return ret;
zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
memset(dp->train_set, 0, sizeof(dp->train_set));
ret = zynqmp_dp_link_train_cr(dp);
if (ret)
return ret;
ret = zynqmp_dp_link_train_ce(dp);
if (ret)
return ret;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
if (ret < 0) {
dev_err(dp->dev, "failed to disable training pattern\n");
return ret;
}
zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 0);
return 0;
}
/**
* zynqmp_dp_train_loop - Downshift the link rate during training
* @dp: DisplayPort IP core structure
*
* Train the link by downshifting the link rate if training is not successful.
*/
static void zynqmp_dp_train_loop(struct zynqmp_dp *dp)
{
struct zynqmp_dp_mode *mode = &dp->mode;
u8 bw = mode->bw_code;
int ret;
do {
if (dp->status == connector_status_disconnected ||
!dp->enabled)
return;
ret = zynqmp_dp_train(dp);
if (!ret)
return;
ret = zynqmp_dp_mode_configure(dp, mode->pclock, bw);
if (ret < 0)
goto err_out;
bw = ret;
} while (bw >= DP_LINK_BW_1_62);
err_out:
dev_err(dp->dev, "failed to train the DP link\n");
}
/* -----------------------------------------------------------------------------
* DisplayPort AUX
*/
#define AUX_READ_BIT 0x1
/**
* zynqmp_dp_aux_cmd_submit - Submit aux command
* @dp: DisplayPort IP core structure
* @cmd: aux command
* @addr: aux address
* @buf: buffer for command data
* @bytes: number of bytes for @buf
* @reply: reply code to be returned
*
* Submit an aux command. All aux related commands, native or i2c aux
* read/write, are submitted through this function. The function is mapped to
* the transfer function of struct drm_dp_aux. This function involves in
* multiple register reads/writes, thus synchronization is needed, and it is
* done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
* if there's no immediate reply to the command submission. The reply code is
* returned at @reply if @reply != NULL.
*
* Return: 0 if the command is submitted properly, or corresponding error code:
* -EBUSY when there is any request already being processed
* -ETIMEDOUT when receiving reply is timed out
* -EIO when received bytes are less than requested
*/
static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
u8 *buf, u8 bytes, u8 *reply)
{
bool is_read = (cmd & AUX_READ_BIT) ? true : false;
u32 reg, i;
reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST)
return -EBUSY;
zynqmp_dp_write(dp, ZYNQMP_DP_AUX_ADDRESS, addr);
if (!is_read)
for (i = 0; i < bytes; i++)
zynqmp_dp_write(dp, ZYNQMP_DP_AUX_WRITE_FIFO,
buf[i]);
reg = cmd << ZYNQMP_DP_AUX_COMMAND_CMD_SHIFT;
if (!buf || !bytes)
reg |= ZYNQMP_DP_AUX_COMMAND_ADDRESS_ONLY;
else
reg |= (bytes - 1) << ZYNQMP_DP_AUX_COMMAND_BYTES_SHIFT;
zynqmp_dp_write(dp, ZYNQMP_DP_AUX_COMMAND, reg);
/* Wait for reply to be delivered upto 2ms */
for (i = 0; ; i++) {
reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY)
break;
if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT ||
i == 2)
return -ETIMEDOUT;
usleep_range(1000, 1100);
}
reg = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_CODE);
if (reply)
*reply = reg;
if (is_read &&
(reg == ZYNQMP_DP_AUX_REPLY_CODE_AUX_ACK ||
reg == ZYNQMP_DP_AUX_REPLY_CODE_I2C_ACK)) {
reg = zynqmp_dp_read(dp, ZYNQMP_DP_REPLY_DATA_COUNT);
if ((reg & ZYNQMP_DP_REPLY_DATA_COUNT_MASK) != bytes)
return -EIO;
for (i = 0; i < bytes; i++)
buf[i] = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_DATA);
}
return 0;
}
static ssize_t
zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct zynqmp_dp *dp = container_of(aux, struct zynqmp_dp, aux);
int ret;
unsigned int i, iter;
/* Number of loops = timeout in msec / aux delay (400 usec) */
iter = zynqmp_dp_aux_timeout_ms * 1000 / 400;
iter = iter ? iter : 1;
for (i = 0; i < iter; i++) {
ret = zynqmp_dp_aux_cmd_submit(dp, msg->request, msg->address,
msg->buffer, msg->size,
&msg->reply);
if (!ret) {
dev_dbg(dp->dev, "aux %d retries\n", i);
return msg->size;
}
if (dp->status == connector_status_disconnected) {
dev_dbg(dp->dev, "no connected aux device\n");
return -ENODEV;
}
usleep_range(400, 500);
}
dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
return ret;
}
/**
* zynqmp_dp_aux_init - Initialize and register the DP AUX
* @dp: DisplayPort IP core structure
*
* Program the AUX clock divider and filter and register the DP AUX adapter.
*
* Return: 0 on success, error value otherwise
*/
static int zynqmp_dp_aux_init(struct zynqmp_dp *dp)
{
unsigned long rate;
unsigned int w;
/*
* The AUX_SIGNAL_WIDTH_FILTER is the number of APB clock cycles
* corresponding to the AUX pulse. Allowable values are 8, 16, 24, 32,
* 40 and 48. The AUX pulse width must be between 0.4µs and 0.6µs,
* compute the w / 8 value corresponding to 0.4µs rounded up, and make
* sure it stays below 0.6µs and within the allowable values.
*/
rate = clk_get_rate(dp->dpsub->apb_clk);
w = DIV_ROUND_UP(4 * rate, 1000 * 1000 * 10 * 8) * 8;
if (w > 6 * rate / (1000 * 1000 * 10) || w > 48) {
dev_err(dp->dev, "aclk frequency too high\n");
return -EINVAL;
}
zynqmp_dp_write(dp, ZYNQMP_DP_AUX_CLK_DIVIDER,
(w << ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT) |
(rate / (1000 * 1000)));
dp->aux.name = "ZynqMP DP AUX";
dp->aux.dev = dp->dev;
dp->aux.drm_dev = dp->bridge.dev;
dp->aux.transfer = zynqmp_dp_aux_transfer;
return drm_dp_aux_register(&dp->aux);
}
/**
* zynqmp_dp_aux_cleanup - Cleanup the DP AUX
* @dp: DisplayPort IP core structure
*
* Unregister the DP AUX adapter.
*/
static void zynqmp_dp_aux_cleanup(struct zynqmp_dp *dp)
{
drm_dp_aux_unregister(&dp->aux);
}
/* -----------------------------------------------------------------------------
* DisplayPort Generic Support
*/
/**
* zynqmp_dp_update_misc - Write the misc registers
* @dp: DisplayPort IP core structure
*
* The misc register values are stored in the structure, and this
* function applies the values into the registers.
*/
static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
{
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_MISC0, dp->config.misc0);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_MISC1, dp->config.misc1);
}
/**
* zynqmp_dp_set_format - Set the input format
* @dp: DisplayPort IP core structure
* @info: Display info
* @format: input format
* @bpc: bits per component
*
* Update misc register values based on input @format and @bpc.
*
* Return: 0 on success, or -EINVAL.
*/
static int zynqmp_dp_set_format(struct zynqmp_dp *dp,
const struct drm_display_info *info,
enum zynqmp_dpsub_format format,
unsigned int bpc)
{
struct zynqmp_dp_config *config = &dp->config;
unsigned int num_colors;
config->misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_MASK;
config->misc1 &= ~ZYNQMP_DP_MAIN_STREAM_MISC1_Y_ONLY_EN;
switch (format) {
case ZYNQMP_DPSUB_FORMAT_RGB:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_RGB;
num_colors = 3;
break;
case ZYNQMP_DPSUB_FORMAT_YCRCB444:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_YCRCB_444;
num_colors = 3;
break;
case ZYNQMP_DPSUB_FORMAT_YCRCB422:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_COMP_FORMAT_YCRCB_422;
num_colors = 2;
break;
case ZYNQMP_DPSUB_FORMAT_YONLY:
config->misc1 |= ZYNQMP_DP_MAIN_STREAM_MISC1_Y_ONLY_EN;
num_colors = 1;
break;
default:
dev_err(dp->dev, "Invalid colormetry in DT\n");
return -EINVAL;
}
if (info && info->bpc && bpc > info->bpc) {
dev_warn(dp->dev,
"downgrading requested %ubpc to display limit %ubpc\n",
bpc, info->bpc);
bpc = info->bpc;
}
config->misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_MASK;
switch (bpc) {
case 6:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_6;
break;
case 8:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_8;
break;
case 10:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_10;
break;
case 12:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_12;
break;
case 16:
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_16;
break;
default:
dev_warn(dp->dev, "Not supported bpc (%u). fall back to 8bpc\n",
bpc);
config->misc0 |= ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_8;
bpc = 8;
break;
}
/* Update the current bpp based on the format. */
config->bpp = bpc * num_colors;
return 0;
}
/**
* zynqmp_dp_encoder_mode_set_transfer_unit - Set the transfer unit values
* @dp: DisplayPort IP core structure
* @mode: requested display mode
*
* Set the transfer unit, and calculate all transfer unit size related values.
* Calculation is based on DP and IP core specification.
*/
static void
zynqmp_dp_encoder_mode_set_transfer_unit(struct zynqmp_dp *dp,
const struct drm_display_mode *mode)
{
u32 tu = ZYNQMP_DP_MSA_TRANSFER_UNIT_SIZE_TU_SIZE_DEF;
u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
/* Use the max transfer unit size (default) */
zynqmp_dp_write(dp, ZYNQMP_DP_MSA_TRANSFER_UNIT_SIZE, tu);
vid_kbytes = mode->clock * (dp->config.bpp / 8);
bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
zynqmp_dp_write(dp, ZYNQMP_DP_MIN_BYTES_PER_TU,
avg_bytes_per_tu / 1000);
zynqmp_dp_write(dp, ZYNQMP_DP_FRAC_BYTES_PER_TU,
avg_bytes_per_tu % 1000);
/* Configure the initial wait cycle based on transfer unit size */
if (tu < (avg_bytes_per_tu / 1000))
init_wait = 0;
else if ((avg_bytes_per_tu / 1000) <= 4)
init_wait = tu;
else
init_wait = tu - avg_bytes_per_tu / 1000;
zynqmp_dp_write(dp, ZYNQMP_DP_INIT_WAIT, init_wait);
}
/**
* zynqmp_dp_encoder_mode_set_stream - Configure the main stream
* @dp: DisplayPort IP core structure
* @mode: requested display mode
*
* Configure the main stream based on the requested mode @mode. Calculation is
* based on IP core specification.
*/
static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
const struct drm_display_mode *mode)
{
u8 lane_cnt = dp->mode.lane_cnt;
u32 reg, wpl;
unsigned int rate;
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HTOTAL, mode->htotal);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VTOTAL, mode->vtotal);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_POLARITY,
(!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
ZYNQMP_DP_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
(!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
ZYNQMP_DP_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HSWIDTH,
mode->hsync_end - mode->hsync_start);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VSWIDTH,
mode->vsync_end - mode->vsync_start);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HRES, mode->hdisplay);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VRES, mode->vdisplay);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HSTART,
mode->htotal - mode->hsync_start);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VSTART,
mode->vtotal - mode->vsync_start);
/* In synchronous mode, set the dividers */
if (dp->config.misc0 & ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK) {
reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_N_VID, reg);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_M_VID, mode->clock);
rate = zynqmp_dpsub_get_audio_clk_rate(dp->dpsub);
if (rate) {
dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, reg);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
}
}
/* Only 2 channel audio is supported now */
if (zynqmp_dpsub_audio_enabled(dp->dpsub))
zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
zynqmp_dp_write(dp, ZYNQMP_DP_USER_PIX_WIDTH, 1);
/* Translate to the native 16 bit datapath based on IP core spec */
wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
reg = wpl + wpl % lane_cnt - lane_cnt;
zynqmp_dp_write(dp, ZYNQMP_DP_USER_DATA_COUNT_PER_LANE, reg);
}
/* -----------------------------------------------------------------------------
* DISP Configuration
*/
static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
struct drm_bridge_state *old_bridge_state)
{
enum zynqmp_dpsub_layer_id layer_id;
struct zynqmp_disp_layer *layer;
const struct drm_format_info *info;
if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
layer_id = ZYNQMP_DPSUB_LAYER_VID;
else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
layer_id = ZYNQMP_DPSUB_LAYER_GFX;
else
return;
layer = dp->dpsub->layers[layer_id];
/* TODO: Make the format configurable. */
info = drm_format_info(DRM_FORMAT_YUV422);
zynqmp_disp_layer_set_format(layer, info);
zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_LIVE);
if (layer_id == ZYNQMP_DPSUB_LAYER_GFX)
zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, true, 255);
else
zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, false, 0);
zynqmp_disp_enable(dp->dpsub->disp);
}
static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
struct drm_bridge_state *old_bridge_state)
{
struct zynqmp_disp_layer *layer;
if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
else
return;
zynqmp_disp_disable(dp->dpsub->disp);
zynqmp_disp_layer_disable(layer);
}
/* -----------------------------------------------------------------------------
* DRM Bridge
*/
static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
int ret;
/* Initialize and register the AUX adapter. */
ret = zynqmp_dp_aux_init(dp);
if (ret) {
dev_err(dp->dev, "failed to initialize DP aux\n");
return ret;
}
if (dp->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
bridge, flags);
if (ret < 0)
goto error;
}
/* Now that initialisation is complete, enable interrupts. */
zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_ALL);
return 0;
error:
zynqmp_dp_aux_cleanup(dp);
return ret;
}
static void zynqmp_dp_bridge_detach(struct drm_bridge *bridge)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
zynqmp_dp_aux_cleanup(dp);
}
static enum drm_mode_status
zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
int rate;
if (mode->clock > ZYNQMP_MAX_FREQ) {
dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
drm_mode_debug_printmodeline(mode);
return MODE_CLOCK_HIGH;
}
/* Check with link rate and lane count */
rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
dp->link_config.max_lanes, dp->config.bpp);
if (mode->clock > rate) {
dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
drm_mode_debug_printmodeline(mode);
return MODE_CLOCK_HIGH;
}
return MODE_OK;
}
static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
const struct drm_display_mode *mode;
struct drm_connector *connector;
struct drm_crtc *crtc;
unsigned int i;
int rate;
int ret;
pm_runtime_get_sync(dp->dev);
zynqmp_dp_disp_enable(dp, old_bridge_state);
/*
* Retrieve the CRTC mode and adjusted mode. This requires a little
* dance to go from the bridge to the encoder, to the connector and to
* the CRTC.
*/
connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
adjusted_mode = &crtc_state->adjusted_mode;
mode = &crtc_state->mode;
zynqmp_dp_set_format(dp, &connector->display_info,
ZYNQMP_DPSUB_FORMAT_RGB, 8);
/* Check again as bpp or format might have been changed */
rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
dp->link_config.max_lanes, dp->config.bpp);
if (mode->clock > rate) {
dev_err(dp->dev, "mode %s has too high pixel rate\n",
mode->name);
drm_mode_debug_printmodeline(mode);
}
/* Configure the mode */
ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
if (ret < 0) {
pm_runtime_put_sync(dp->dev);
return;
}
zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
zynqmp_dp_encoder_mode_set_stream(dp, adjusted_mode);
/* Enable the encoder */
dp->enabled = true;
zynqmp_dp_update_misc(dp);
if (zynqmp_dpsub_audio_enabled(dp->dpsub))
zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
if (dp->status == connector_status_connected) {
for (i = 0; i < 3; i++) {
ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
DP_SET_POWER_D0);
if (ret == 1)
break;
usleep_range(300, 500);
}
/* Some monitors take time to wake up properly */
msleep(zynqmp_dp_power_on_delay_ms);
}
if (ret != 1)
dev_dbg(dp->dev, "DP aux failed\n");
else
zynqmp_dp_train_loop(dp);
zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET,
ZYNQMP_DP_SOFTWARE_RESET_ALL);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1);
}
static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
dp->enabled = false;
cancel_delayed_work(&dp->hpd_work);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0);
drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
if (zynqmp_dpsub_audio_enabled(dp->dpsub))
zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
zynqmp_dp_disp_disable(dp, old_bridge_state);
pm_runtime_put_sync(dp->dev);
}
#define ZYNQMP_DP_MIN_H_BACKPORCH 20
static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
int diff = mode->htotal - mode->hsync_end;
/*
* ZynqMP DP requires horizontal backporch to be greater than 12.
* This limitation may not be compatible with the sink device.
*/
if (diff < ZYNQMP_DP_MIN_H_BACKPORCH) {
int vrefresh = (adjusted_mode->clock * 1000) /
(adjusted_mode->vtotal * adjusted_mode->htotal);
dev_dbg(dp->dev, "hbackporch adjusted: %d to %d",
diff, ZYNQMP_DP_MIN_H_BACKPORCH - diff);
diff = ZYNQMP_DP_MIN_H_BACKPORCH - diff;
adjusted_mode->htotal += diff;
adjusted_mode->clock = adjusted_mode->vtotal *
adjusted_mode->htotal * vrefresh / 1000;
}
return 0;
}
static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
struct zynqmp_dp_link_config *link_config = &dp->link_config;
u32 state, i;
int ret;
/*
* This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
* get the HPD signal with some monitors.
*/
for (i = 0; i < 10; i++) {
state = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD)
break;
msleep(100);
}
if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD) {
ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
sizeof(dp->dpcd));
if (ret < 0) {
dev_dbg(dp->dev, "DPCD read failed");
goto disconnected;
}
link_config->max_rate = min_t(int,
drm_dp_max_link_rate(dp->dpcd),
DP_HIGH_BIT_RATE2);
link_config->max_lanes = min_t(u8,
drm_dp_max_lane_count(dp->dpcd),
dp->num_lanes);
dp->status = connector_status_connected;
return connector_status_connected;
}
disconnected:
dp->status = connector_status_disconnected;
return connector_status_disconnected;
}
static struct edid *zynqmp_dp_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
return drm_get_edid(connector, &dp->aux.ddc);
}
static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.attach = zynqmp_dp_bridge_attach,
.detach = zynqmp_dp_bridge_detach,
.mode_valid = zynqmp_dp_bridge_mode_valid,
.atomic_enable = zynqmp_dp_bridge_atomic_enable,
.atomic_disable = zynqmp_dp_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_check = zynqmp_dp_bridge_atomic_check,
.detect = zynqmp_dp_bridge_detect,
.get_edid = zynqmp_dp_bridge_get_edid,
};
/* -----------------------------------------------------------------------------
* Interrupt Handling
*/
/**
* zynqmp_dp_enable_vblank - Enable vblank
* @dp: DisplayPort IP core structure
*
* Enable vblank interrupt
*/
void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp)
{
zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_VBLANK_START);
}
/**
* zynqmp_dp_disable_vblank - Disable vblank
* @dp: DisplayPort IP core structure
*
* Disable vblank interrupt
*/
void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp)
{
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_VBLANK_START);
}
static void zynqmp_dp_hpd_work_func(struct work_struct *work)
{
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
hpd_work.work);
enum drm_connector_status status;
status = zynqmp_dp_bridge_detect(&dp->bridge);
drm_bridge_hpd_notify(&dp->bridge, status);
}
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
{
struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
u32 status, mask;
status = zynqmp_dp_read(dp, ZYNQMP_DP_INT_STATUS);
mask = zynqmp_dp_read(dp, ZYNQMP_DP_INT_MASK);
if (!(status & ~mask))
return IRQ_NONE;
/* dbg for diagnostic, but not much that the driver can do */
if (status & ZYNQMP_DP_INT_CHBUF_UNDERFLW_MASK)
dev_dbg_ratelimited(dp->dev, "underflow interrupt\n");
if (status & ZYNQMP_DP_INT_CHBUF_OVERFLW_MASK)
dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
zynqmp_dp_write(dp, ZYNQMP_DP_INT_STATUS, status);
if (status & ZYNQMP_DP_INT_VBLANK_START)
zynqmp_dpsub_drm_handle_vblank(dp->dpsub);
if (status & ZYNQMP_DP_INT_HPD_EVENT)
schedule_delayed_work(&dp->hpd_work, 0);
if (status & ZYNQMP_DP_INT_HPD_IRQ) {
int ret;
u8 status[DP_LINK_STATUS_SIZE + 2];
ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
DP_LINK_STATUS_SIZE + 2);
if (ret < 0)
goto handled;
if (status[4] & DP_LINK_STATUS_UPDATED ||
!drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
!drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
zynqmp_dp_train_loop(dp);
}
}
handled:
return IRQ_HANDLED;
}
/* -----------------------------------------------------------------------------
* Initialization & Cleanup
*/
int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct drm_bridge *bridge;
struct zynqmp_dp *dp;
struct resource *res;
int ret;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
dp->status = connector_status_disconnected;
INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
/* Acquire all resources (IOMEM, IRQ and PHYs). */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
dp->iomem = devm_ioremap_resource(dp->dev, res);
if (IS_ERR(dp->iomem)) {
ret = PTR_ERR(dp->iomem);
goto err_free;
}
dp->irq = platform_get_irq(pdev, 0);
if (dp->irq < 0) {
ret = dp->irq;
goto err_free;
}
dp->reset = devm_reset_control_get(dp->dev, NULL);
if (IS_ERR(dp->reset)) {
if (PTR_ERR(dp->reset) != -EPROBE_DEFER)
dev_err(dp->dev, "failed to get reset: %ld\n",
PTR_ERR(dp->reset));
ret = PTR_ERR(dp->reset);
goto err_free;
}
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
goto err_free;
ret = zynqmp_dp_phy_probe(dp);
if (ret)
goto err_reset;
/* Initialize the bridge. */
bridge = &dp->bridge;
bridge->funcs = &zynqmp_dp_bridge_funcs;
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
dpsub->bridge = bridge;
/*
* Acquire the next bridge in the chain. Ignore errors caused by port@5
* not being connected for backward-compatibility with older DTs.
*/
ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 5, 0, NULL,
&dp->next_bridge);
if (ret < 0 && ret != -ENODEV)
goto err_reset;
/* Initialize the hardware. */
dp->config.misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK;
zynqmp_dp_set_format(dp, NULL, ZYNQMP_DPSUB_FORMAT_RGB, 8);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
zynqmp_dp_set(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
zynqmp_dp_write(dp, ZYNQMP_DP_FORCE_SCRAMBLER_RESET, 1);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
ret = zynqmp_dp_phy_init(dp);
if (ret)
goto err_reset;
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 1);
/*
* Now that the hardware is initialized and won't generate spurious
* interrupts, request the IRQ.
*/
ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL,
zynqmp_dp_irq_handler, IRQF_ONESHOT,
dev_name(dp->dev), dp);
if (ret < 0)
goto err_phy_exit;
dpsub->dp = dp;
dev_dbg(dp->dev, "ZynqMP DisplayPort Tx probed with %u lanes\n",
dp->num_lanes);
return 0;
err_phy_exit:
zynqmp_dp_phy_exit(dp);
err_reset:
zynqmp_dp_reset(dp, true);
err_free:
kfree(dp);
return ret;
}
void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
{
struct zynqmp_dp *dp = dpsub->dp;
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL);
disable_irq(dp->irq);
cancel_delayed_work_sync(&dp->hpd_work);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
zynqmp_dp_phy_exit(dp);
zynqmp_dp_reset(dp, true);
}
| linux-master | drivers/gpu/drm/xlnx/zynqmp_dp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ZynqMP DisplayPort Subsystem Driver
*
* Copyright (C) 2017 - 2020 Xilinx, Inc.
*
* Authors:
* - Hyun Woo Kwon <[email protected]>
* - Laurent Pinchart <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include "zynqmp_disp.h"
#include "zynqmp_dp.h"
#include "zynqmp_dpsub.h"
#include "zynqmp_kms.h"
/* -----------------------------------------------------------------------------
* Power Management
*/
static int __maybe_unused zynqmp_dpsub_suspend(struct device *dev)
{
struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
if (!dpsub->drm)
return 0;
return drm_mode_config_helper_suspend(&dpsub->drm->dev);
}
static int __maybe_unused zynqmp_dpsub_resume(struct device *dev)
{
struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
if (!dpsub->drm)
return 0;
return drm_mode_config_helper_resume(&dpsub->drm->dev);
}
static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_suspend, zynqmp_dpsub_resume)
};
/* -----------------------------------------------------------------------------
* DPSUB Configuration
*/
/**
* zynqmp_dpsub_audio_enabled - If the audio is enabled
* @dpsub: DisplayPort subsystem
*
* Return if the audio is enabled depending on the audio clock.
*
* Return: true if audio is enabled, or false.
*/
bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub)
{
return !!dpsub->aud_clk;
}
/**
* zynqmp_dpsub_get_audio_clk_rate - Get the current audio clock rate
* @dpsub: DisplayPort subsystem
*
* Return: the current audio clock rate.
*/
unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub)
{
if (zynqmp_dpsub_audio_enabled(dpsub))
return 0;
return clk_get_rate(dpsub->aud_clk);
}
/* -----------------------------------------------------------------------------
* Probe & Remove
*/
static int zynqmp_dpsub_init_clocks(struct zynqmp_dpsub *dpsub)
{
int ret;
dpsub->apb_clk = devm_clk_get(dpsub->dev, "dp_apb_clk");
if (IS_ERR(dpsub->apb_clk))
return PTR_ERR(dpsub->apb_clk);
ret = clk_prepare_enable(dpsub->apb_clk);
if (ret) {
dev_err(dpsub->dev, "failed to enable the APB clock\n");
return ret;
}
/*
* Try the live PL video clock, and fall back to the PS clock if the
* live PL video clock isn't valid.
*/
dpsub->vid_clk = devm_clk_get(dpsub->dev, "dp_live_video_in_clk");
if (!IS_ERR(dpsub->vid_clk))
dpsub->vid_clk_from_ps = false;
else if (PTR_ERR(dpsub->vid_clk) == -EPROBE_DEFER)
return PTR_ERR(dpsub->vid_clk);
if (IS_ERR_OR_NULL(dpsub->vid_clk)) {
dpsub->vid_clk = devm_clk_get(dpsub->dev, "dp_vtc_pixel_clk_in");
if (IS_ERR(dpsub->vid_clk)) {
dev_err(dpsub->dev, "failed to init any video clock\n");
return PTR_ERR(dpsub->vid_clk);
}
dpsub->vid_clk_from_ps = true;
}
/*
* Try the live PL audio clock, and fall back to the PS clock if the
* live PL audio clock isn't valid. Missing audio clock disables audio
* but isn't an error.
*/
dpsub->aud_clk = devm_clk_get(dpsub->dev, "dp_live_audio_aclk");
if (!IS_ERR(dpsub->aud_clk)) {
dpsub->aud_clk_from_ps = false;
return 0;
}
dpsub->aud_clk = devm_clk_get(dpsub->dev, "dp_aud_clk");
if (!IS_ERR(dpsub->aud_clk)) {
dpsub->aud_clk_from_ps = true;
return 0;
}
dev_info(dpsub->dev, "audio disabled due to missing clock\n");
return 0;
}
static int zynqmp_dpsub_parse_dt(struct zynqmp_dpsub *dpsub)
{
struct device_node *np;
unsigned int i;
/*
* For backward compatibility with old device trees that don't contain
* ports, consider that only the DP output port is connected if no
* ports child no exists.
*/
np = of_get_child_by_name(dpsub->dev->of_node, "ports");
of_node_put(np);
if (!np) {
dev_warn(dpsub->dev, "missing ports, update DT bindings\n");
dpsub->connected_ports = BIT(ZYNQMP_DPSUB_PORT_OUT_DP);
dpsub->dma_enabled = true;
return 0;
}
/* Check which ports are connected. */
for (i = 0; i < ZYNQMP_DPSUB_NUM_PORTS; ++i) {
struct device_node *np;
np = of_graph_get_remote_node(dpsub->dev->of_node, i, -1);
if (np) {
dpsub->connected_ports |= BIT(i);
of_node_put(np);
}
}
/* Sanity checks. */
if ((dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO)) &&
(dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))) {
dev_err(dpsub->dev, "only one live video input is supported\n");
return -EINVAL;
}
if ((dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO)) ||
(dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))) {
if (dpsub->vid_clk_from_ps) {
dev_err(dpsub->dev,
"live video input requires PL clock\n");
return -EINVAL;
}
} else {
dpsub->dma_enabled = true;
}
if (dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_AUDIO))
dev_warn(dpsub->dev, "live audio unsupported, ignoring\n");
if ((dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_OUT_VIDEO)) ||
(dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_OUT_AUDIO)))
dev_warn(dpsub->dev, "output to PL unsupported, ignoring\n");
if (!(dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_OUT_DP))) {
dev_err(dpsub->dev, "DP output port not connected\n");
return -EINVAL;
}
return 0;
}
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub)
{
kfree(dpsub->disp);
kfree(dpsub->dp);
kfree(dpsub);
}
static int zynqmp_dpsub_probe(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub;
int ret;
/* Allocate private data. */
dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL);
if (!dpsub)
return -ENOMEM;
dpsub->dev = &pdev->dev;
platform_set_drvdata(pdev, dpsub);
ret = dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
if (ret)
return ret;
/* Try the reserved memory. Proceed if there's none. */
of_reserved_mem_device_init(&pdev->dev);
ret = zynqmp_dpsub_init_clocks(dpsub);
if (ret < 0)
goto err_mem;
ret = zynqmp_dpsub_parse_dt(dpsub);
if (ret < 0)
goto err_mem;
pm_runtime_enable(&pdev->dev);
/*
* DP should be probed first so that the zynqmp_disp can set the output
* format accordingly.
*/
ret = zynqmp_dp_probe(dpsub);
if (ret)
goto err_pm;
ret = zynqmp_disp_probe(dpsub);
if (ret)
goto err_dp;
if (dpsub->dma_enabled) {
ret = zynqmp_dpsub_drm_init(dpsub);
if (ret)
goto err_disp;
} else {
drm_bridge_add(dpsub->bridge);
}
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
return 0;
err_disp:
zynqmp_disp_remove(dpsub);
err_dp:
zynqmp_dp_remove(dpsub);
err_pm:
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dpsub->apb_clk);
err_mem:
of_reserved_mem_device_release(&pdev->dev);
if (!dpsub->drm)
zynqmp_dpsub_release(dpsub);
return ret;
}
static void zynqmp_dpsub_remove(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
if (dpsub->drm)
zynqmp_dpsub_drm_cleanup(dpsub);
else
drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
zynqmp_dp_remove(dpsub);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dpsub->apb_clk);
of_reserved_mem_device_release(&pdev->dev);
if (!dpsub->drm)
zynqmp_dpsub_release(dpsub);
}
static void zynqmp_dpsub_shutdown(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
if (!dpsub->drm)
return;
drm_atomic_helper_shutdown(&dpsub->drm->dev);
}
static const struct of_device_id zynqmp_dpsub_of_match[] = {
{ .compatible = "xlnx,zynqmp-dpsub-1.7", },
{ /* end of table */ },
};
MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
static struct platform_driver zynqmp_dpsub_driver = {
.probe = zynqmp_dpsub_probe,
.remove_new = zynqmp_dpsub_remove,
.shutdown = zynqmp_dpsub_shutdown,
.driver = {
.name = "zynqmp-dpsub",
.pm = &zynqmp_dpsub_pm_ops,
.of_match_table = zynqmp_dpsub_of_match,
},
};
drm_module_platform_driver(zynqmp_dpsub_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/xlnx/zynqmp_dpsub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Brian Starkey <[email protected]>
*
* ARM Mali DP Writeback connector implementation
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
#include "malidp_drv.h"
#include "malidp_hw.h"
#include "malidp_mw.h"
#define to_mw_state(_state) (struct malidp_mw_connector_state *)(_state)
struct malidp_mw_connector_state {
struct drm_connector_state base;
dma_addr_t addrs[2];
s32 pitches[2];
u8 format;
u8 n_planes;
bool rgb2yuv_initialized;
const s16 *rgb2yuv_coeffs;
};
static int malidp_mw_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
}
static enum drm_mode_status
malidp_mw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
int w = mode->hdisplay, h = mode->vdisplay;
if ((w < mode_config->min_width) || (w > mode_config->max_width))
return MODE_BAD_HVALUE;
if ((h < mode_config->min_height) || (h > mode_config->max_height))
return MODE_BAD_VVALUE;
return MODE_OK;
}
static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
.get_modes = malidp_mw_connector_get_modes,
.mode_valid = malidp_mw_connector_mode_valid,
};
static void malidp_mw_connector_reset(struct drm_connector *connector)
{
struct malidp_mw_connector_state *mw_state =
kzalloc(sizeof(*mw_state), GFP_KERNEL);
if (connector->state)
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
__drm_atomic_helper_connector_reset(connector, &mw_state->base);
}
static enum drm_connector_status
malidp_mw_connector_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static void malidp_mw_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
static struct drm_connector_state *
malidp_mw_connector_duplicate_state(struct drm_connector *connector)
{
struct malidp_mw_connector_state *mw_state, *mw_current_state;
if (WARN_ON(!connector->state))
return NULL;
mw_state = kzalloc(sizeof(*mw_state), GFP_KERNEL);
if (!mw_state)
return NULL;
mw_current_state = to_mw_state(connector->state);
mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
return &mw_state->base;
}
static const struct drm_connector_funcs malidp_mw_connector_funcs = {
.reset = malidp_mw_connector_reset,
.detect = malidp_mw_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = malidp_mw_connector_destroy,
.atomic_duplicate_state = malidp_mw_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
47, 157, 16,
-26, -87, 112,
112, -102, -10,
16, 128, 128
};
static int
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state);
struct malidp_drm *malidp = drm_to_malidp(encoder->dev);
struct drm_framebuffer *fb;
int i, n_planes;
if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
if ((fb->width != crtc_state->mode.hdisplay) ||
(fb->height != crtc_state->mode.vdisplay)) {
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
fb->width, fb->height);
return -EINVAL;
}
if (fb->modifier) {
DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n");
return -EINVAL;
}
mw_state->format =
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
fb->format->format, !!fb->modifier);
if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
&fb->format->format);
return -EINVAL;
}
n_planes = fb->format->num_planes;
for (i = 0; i < n_planes; i++) {
struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, i);
/* memory write buffers are never rotated */
u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
if (fb->pitches[i] & (alignment - 1)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
}
mw_state->pitches[i] = fb->pitches[i];
mw_state->addrs[i] = obj->dma_addr + fb->offsets[i];
}
mw_state->n_planes = n_planes;
if (fb->format->is_yuv)
mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
return 0;
}
static const struct drm_encoder_helper_funcs malidp_mw_encoder_helper_funcs = {
.atomic_check = malidp_mw_encoder_atomic_check,
};
static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats)
{
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
u32 *formats;
int n, i;
formats = kcalloc(map->n_pixel_formats, sizeof(*formats),
GFP_KERNEL);
if (!formats)
return NULL;
for (n = 0, i = 0; i < map->n_pixel_formats; i++) {
if (map->pixel_formats[i].layer & SE_MEMWRITE)
formats[n++] = map->pixel_formats[i].format;
}
*n_formats = n;
return formats;
}
int malidp_mw_connector_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
u32 *formats;
int ret, n_formats;
if (!malidp->dev->hw->enable_memwrite)
return 0;
drm_connector_helper_add(&malidp->mw_connector.base,
&malidp_mw_connector_helper_funcs);
formats = get_writeback_formats(malidp, &n_formats);
if (!formats)
return -ENOMEM;
ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
&malidp_mw_connector_funcs,
&malidp_mw_encoder_helper_funcs,
formats, n_formats,
1 << drm_crtc_index(&malidp->crtc));
kfree(formats);
if (ret)
return ret;
return 0;
}
void malidp_mw_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *old_state)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
struct drm_writeback_connector *mw_conn = &malidp->mw_connector;
struct drm_connector_state *conn_state = mw_conn->base.state;
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_mw_connector_state *mw_state;
if (!conn_state)
return;
mw_state = to_mw_state(conn_state);
if (conn_state->writeback_job) {
struct drm_framebuffer *fb = conn_state->writeback_job->fb;
DRM_DEV_DEBUG_DRIVER(drm->dev,
"Enable memwrite %ux%u:%d %pad fmt: %u\n",
fb->width, fb->height,
mw_state->pitches[0],
&mw_state->addrs[0],
mw_state->format);
drm_writeback_queue_job(mw_conn, conn_state);
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format,
!mw_state->rgb2yuv_initialized ?
mw_state->rgb2yuv_coeffs : NULL);
mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
} else {
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
hwdev->hw->disable_memwrite(hwdev);
}
}
| linux-master | drivers/gpu/drm/arm/malidp_mw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Liviu Dudau <[email protected]>
*
* ARM Mali DP500/DP550/DP650 driver (crtc operations)
*/
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "malidp_drv.h"
#include "malidp_hw.h"
static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
/*
* check that the hardware can drive the required clock rate,
* but skip the check if the clock is meant to be disabled (req_rate = 0)
*/
long rate, req_rate = mode->crtc_clock * 1000;
if (req_rate) {
rate = clk_round_rate(hwdev->pxlclk, req_rate);
if (rate != req_rate) {
DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
req_rate);
return MODE_NOCLOCK;
}
}
return MODE_OK;
}
static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct videomode vm;
int err = pm_runtime_get_sync(crtc->dev->dev);
if (err < 0) {
DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err);
return;
}
drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
clk_prepare_enable(hwdev->pxlclk);
/* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
hwdev->hw->modeset(hwdev, &vm);
hwdev->hw->leave_config_mode(hwdev);
drm_crtc_vblank_on(crtc);
}
static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
int err;
/* always disable planes on the CRTC that is being turned off */
drm_atomic_helper_disable_planes_on_crtc(old_state, false);
drm_crtc_vblank_off(crtc);
hwdev->hw->enter_config_mode(hwdev);
clk_disable_unprepare(hwdev->pxlclk);
err = pm_runtime_put(crtc->dev->dev);
if (err < 0) {
DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err);
}
}
static const struct gamma_curve_segment {
u16 start;
u16 end;
} segments[MALIDP_COEFFTAB_NUM_COEFFS] = {
/* sector 0 */
{ 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 },
{ 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
{ 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 },
{ 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 },
/* sector 1 */
{ 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 },
/* sector 2 */
{ 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 },
/* sector 3 */
{ 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 },
/* sector 4 */
{ 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 },
/* sector 5 */
{ 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 },
/* sector 6 */
{ 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 },
{ 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 },
{ 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 },
{ 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 },
{ 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 },
{ 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 },
{ 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 },
};
#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff)))
static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob,
u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS])
{
struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data;
int i;
for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) {
u32 a, b, delta_in, out_start, out_end;
delta_in = segments[i].end - segments[i].start;
/* DP has 12-bit internal precision for its LUTs. */
out_start = drm_color_lut_extract(lut[segments[i].start].green,
12);
out_end = drm_color_lut_extract(lut[segments[i].end].green, 12);
a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in;
b = out_start;
coeffs[i] = DE_COEFTAB_DATA(a, b);
}
}
/*
* Check if there is a new gamma LUT and if it is of an acceptable size. Also,
* reject any LUTs that use distinct red, green, and blue curves.
*/
static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
struct drm_color_lut *lut;
size_t lut_size;
int i;
if (!state->color_mgmt_changed || !state->gamma_lut)
return 0;
if (crtc->state->gamma_lut &&
(crtc->state->gamma_lut->base.id == state->gamma_lut->base.id))
return 0;
if (state->gamma_lut->length % sizeof(struct drm_color_lut))
return -EINVAL;
lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut);
if (lut_size != MALIDP_GAMMA_LUT_SIZE)
return -EINVAL;
lut = (struct drm_color_lut *)state->gamma_lut->data;
for (i = 0; i < lut_size; ++i)
if (!((lut[i].red == lut[i].green) &&
(lut[i].red == lut[i].blue)))
return -EINVAL;
if (!state->mode_changed) {
int ret;
state->mode_changed = true;
/*
* Kerneldoc for drm_atomic_helper_check_modeset mandates that
* it be invoked when the driver sets ->mode_changed. Since
* changing the gamma LUT doesn't depend on any external
* resources, it is safe to call it only once.
*/
ret = drm_atomic_helper_check_modeset(crtc->dev, state->state);
if (ret)
return ret;
}
malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs);
return 0;
}
/*
* Check if there is a new CTM and if it contains valid input. Valid here means
* that the number is inside the representable range for a Q3.12 number,
* excluding truncating the fractional part of the input data.
*
* The COLORADJ registers can be changed atomically.
*/
static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
struct drm_color_ctm *ctm;
int i;
if (!state->color_mgmt_changed)
return 0;
if (!state->ctm)
return 0;
if (crtc->state->ctm && (crtc->state->ctm->base.id ==
state->ctm->base.id))
return 0;
/*
* The size of the ctm is checked in
* drm_atomic_replace_property_blob_from_id.
*/
ctm = (struct drm_color_ctm *)state->ctm->data;
for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
/* Convert from S31.32 to Q3.12. */
s64 val = ctm->matrix[i];
u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) &
GENMASK_ULL(14, 0);
/*
* Convert to 2s complement and check the destination's top bit
* for overflow. NB: Can't check before converting or it'd
* incorrectly reject the case:
* sign == 1
* mag == 0x2000
*/
if (val & BIT_ULL(63))
mag = ~mag + 1;
if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14)))
return -EINVAL;
mc->coloradj_coeffs[i] = mag;
}
return 0;
}
static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_crtc_state *cs = to_malidp_crtc_state(state);
struct malidp_se_config *s = &cs->scaler_config;
struct drm_plane *plane;
struct videomode vm;
const struct drm_plane_state *pstate;
u32 h_upscale_factor = 0; /* U16.16 */
u32 v_upscale_factor = 0; /* U16.16 */
u8 scaling = cs->scaled_planes_mask;
int ret;
if (!scaling) {
s->scale_enable = false;
goto mclk_calc;
}
/* The scaling engine can only handle one plane at a time. */
if (scaling & (scaling - 1))
return -EINVAL;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
struct malidp_plane *mp = to_malidp_plane(plane);
u32 phase;
if (!(mp->layer->id & scaling))
continue;
/*
* Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h]
* to get the U16.16 result.
*/
h_upscale_factor = div_u64((u64)pstate->crtc_w << 32,
pstate->src_w);
v_upscale_factor = div_u64((u64)pstate->crtc_h << 32,
pstate->src_h);
s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
(v_upscale_factor >> 16) >= 2);
if (pstate->rotation & MALIDP_ROTATED_MASK) {
s->input_w = pstate->src_h >> 16;
s->input_h = pstate->src_w >> 16;
} else {
s->input_w = pstate->src_w >> 16;
s->input_h = pstate->src_h >> 16;
}
s->output_w = pstate->crtc_w;
s->output_h = pstate->crtc_h;
#define SE_N_PHASE 4
#define SE_SHIFT_N_PHASE 12
/* Calculate initial_phase and delta_phase for horizontal. */
phase = s->input_w;
s->h_init_phase =
((phase << SE_N_PHASE) / s->output_w + 1) / 2;
phase = s->input_w;
phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
s->h_delta_phase = phase / s->output_w;
/* Same for vertical. */
phase = s->input_h;
s->v_init_phase =
((phase << SE_N_PHASE) / s->output_h + 1) / 2;
phase = s->input_h;
phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
s->v_delta_phase = phase / s->output_h;
#undef SE_N_PHASE
#undef SE_SHIFT_N_PHASE
s->plane_src_id = mp->layer->id;
}
s->scale_enable = true;
s->hcoeff = malidp_se_select_coeffs(h_upscale_factor);
s->vcoeff = malidp_se_select_coeffs(v_upscale_factor);
mclk_calc:
drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
if (ret < 0)
return -EINVAL;
return 0;
}
static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
u32 rot_mem_free, rot_mem_usable;
int rotated_planes = 0;
int ret;
/*
* check if there is enough rotation memory available for planes
* that need 90° and 270° rotion or planes that are compressed.
* Each plane has set its required memory size in the ->plane_check()
* callback, here we only make sure that the sums are less that the
* total usable memory.
*
* The rotation memory allocation algorithm (for each plane):
* a. If no more rotated or compressed planes exist, all remaining
* rotate memory in the bank is available for use by the plane.
* b. If other rotated or compressed planes exist, and plane's
* layer ID is DE_VIDEO1, it can use all the memory from first bank
* if secondary rotation memory bank is available, otherwise it can
* use up to half the bank's memory.
* c. If other rotated or compressed planes exist, and plane's layer ID
* is not DE_VIDEO1, it can use half of the available memory.
*
* Note: this algorithm assumes that the order in which the planes are
* checked always has DE_VIDEO1 plane first in the list if it is
* rotated. Because that is how we create the planes in the first
* place, under current DRM version things work, but if ever the order
* in which drm_atomic_crtc_state_for_each_plane() iterates over planes
* changes, we need to pre-sort the planes before validation.
*/
/* first count the number of rotated planes */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct drm_framebuffer *fb = pstate->fb;
if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier)
rotated_planes++;
}
rot_mem_free = hwdev->rotation_memory[0];
/*
* if we have more than 1 plane using rotation memory, use the second
* block of rotation memory as well
*/
if (rotated_planes > 1)
rot_mem_free += hwdev->rotation_memory[1];
/* now validate the rotation memory requirements */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) {
/* process current plane */
rotated_planes--;
if (!rotated_planes) {
/* no more rotated planes, we can use what's left */
rot_mem_usable = rot_mem_free;
} else {
if ((mp->layer->id != DE_VIDEO1) ||
(hwdev->rotation_memory[1] == 0))
rot_mem_usable = rot_mem_free / 2;
else
rot_mem_usable = hwdev->rotation_memory[0];
}
rot_mem_free -= rot_mem_usable;
if (ms->rotmem_size > rot_mem_usable)
return -EINVAL;
}
}
/* If only the writeback routing has changed, we don't need a modeset */
if (crtc_state->connectors_changed) {
u32 old_mask = crtc->state->connector_mask;
u32 new_mask = crtc_state->connector_mask;
if ((old_mask ^ new_mask) ==
(1 << drm_connector_index(&malidp->mw_connector.base)))
crtc_state->connectors_changed = false;
}
ret = malidp_crtc_atomic_check_gamma(crtc, crtc_state);
ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, crtc_state);
ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, crtc_state);
return ret;
}
static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
.mode_valid = malidp_crtc_mode_valid,
.atomic_check = malidp_crtc_atomic_check,
.atomic_enable = malidp_crtc_atomic_enable,
.atomic_disable = malidp_crtc_atomic_disable,
};
static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct malidp_crtc_state *state, *old_state;
if (WARN_ON(!crtc->state))
return NULL;
old_state = to_malidp_crtc_state(crtc->state);
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
memcpy(state->gamma_coeffs, old_state->gamma_coeffs,
sizeof(state->gamma_coeffs));
memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs,
sizeof(state->coloradj_coeffs));
memcpy(&state->scaler_config, &old_state->scaler_config,
sizeof(state->scaler_config));
state->scaled_planes_mask = 0;
return &state->base;
}
static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct malidp_crtc_state *mali_state = NULL;
if (state) {
mali_state = to_malidp_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(state);
}
kfree(mali_state);
}
static void malidp_crtc_reset(struct drm_crtc *crtc)
{
struct malidp_crtc_state *state =
kzalloc(sizeof(*state), GFP_KERNEL);
if (crtc->state)
malidp_crtc_destroy_state(crtc, crtc->state);
if (state)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.vsync_irq);
return 0;
}
static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.vsync_irq);
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = malidp_crtc_reset,
.atomic_duplicate_state = malidp_crtc_duplicate_state,
.atomic_destroy_state = malidp_crtc_destroy_state,
.enable_vblank = malidp_crtc_enable_vblank,
.disable_vblank = malidp_crtc_disable_vblank,
};
int malidp_crtc_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
struct drm_plane *primary = NULL, *plane;
int ret;
ret = malidp_de_planes_init(drm);
if (ret < 0) {
DRM_ERROR("Failed to initialise planes\n");
return ret;
}
drm_for_each_plane(plane, drm) {
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
primary = plane;
break;
}
}
if (!primary) {
DRM_ERROR("no primary plane found\n");
return -EINVAL;
}
ret = drmm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
&malidp_crtc_funcs, NULL);
if (ret)
return ret;
drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
/* No inverse-gamma: it is per-plane. */
drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE);
malidp_se_set_enh_coeffs(malidp->dev);
return 0;
}
| linux-master | drivers/gpu/drm/arm/malidp_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Liviu Dudau <[email protected]>
*
* ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where
* the difference between various versions of the hardware is being dealt with
* in an attempt to provide to the rest of the driver code a unified view
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/io.h>
#include <video/videomode.h>
#include <video/display_timing.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
#include <drm/drm_print.h>
#include "malidp_drv.h"
#include "malidp_hw.h"
#include "malidp_mw.h"
enum {
MW_NOT_ENABLED = 0, /* SE writeback not enabled */
MW_ONESHOT, /* SE in one-shot mode for writeback */
MW_START, /* SE started writeback */
MW_RESTART, /* SE will start another writeback after this one */
MW_STOP, /* SE needs to stop after this writeback */
};
static const struct malidp_format_id malidp500_de_formats[] = {
/* fourcc, layers supporting the format, internal id */
{ DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 0 },
{ DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 1 },
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
{ DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 4 },
{ DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 5 },
{ DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
{ DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 },
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 },
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
{ DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
{ DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
{ DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 },
/* These are supported with AFBC only */
{ DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 },
{ DRM_FORMAT_VUY888, DE_VIDEO1, 16 },
{ DRM_FORMAT_VUY101010, DE_VIDEO1, 17 },
{ DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 }
};
#define MALIDP_ID(__group, __format) \
((((__group) & 0x7) << 3) | ((__format) & 0x7))
#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1)
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
{ DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
{ DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 1) }, \
{ DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 2) }, \
{ DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 3) }, \
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
{ DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
{ DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
{ DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 0) }, \
{ DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 1) }, \
{ DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 2) }, \
{ DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 3) }, \
{ DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 0) }, \
{ DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 1) }, \
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
/* This is only supported with linear modifier */ \
{ DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\
/* This is only supported with AFBC modifier */ \
{ DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
/* This is only supported with linear modifier */ \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
/* This is only supported with AFBC modifier */ \
{ DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
/* This is only supported with linear modifier */ \
{ DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
/* This is only supported with AFBC modifier */ \
{ DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
{ DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \
/* This is only supported with AFBC modifier */ \
{ DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \
{ DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}
static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
};
static const struct malidp_format_id malidp650_de_formats[] = {
MALIDP_COMMON_FORMATS,
{ DRM_FORMAT_X0L0, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 4)},
};
static const struct malidp_layer malidp500_layers[] = {
/* id, base address, fb pointer address base, stride offset,
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY,
MALIDP500_DE_LV_AD_CTRL },
{ DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
MALIDP500_DE_LG1_AD_CTRL },
{ DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
MALIDP500_DE_LG2_AD_CTRL },
};
static const struct malidp_layer malidp550_layers[] = {
/* id, base address, fb pointer address base, stride offset,
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 },
};
static const struct malidp_layer malidp650_layers[] = {
/* id, base address, fb pointer address base, stride offset,
* yuv2rgb matrix offset, mmu control register offset,
* rotation_features
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL,
ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL,
ROTATE_NONE, 0 },
};
const u64 malidp_format_modifiers[] = {
/* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR),
/* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT),
/* All 8 or 10 bit YUV 444 formats. */
/* In DP550, 10 bit YUV 420 format also supported */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT),
/* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16),
/* YUV 420, 422 P1 8, 10 bit formats */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR),
/* All formats */
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
#define SE_N_SCALING_COEFFS 96
static const u16 dp500_se_scaling_coeffs[][SE_N_SCALING_COEFFS] = {
[MALIDP_UPSCALING_COEFFS - 1] = {
0x0000, 0x0001, 0x0007, 0x0011, 0x001e, 0x002e, 0x003f, 0x0052,
0x0064, 0x0073, 0x007d, 0x0080, 0x007a, 0x006c, 0x0053, 0x002f,
0x0000, 0x3fc6, 0x3f83, 0x3f39, 0x3eea, 0x3e9b, 0x3e4f, 0x3e0a,
0x3dd4, 0x3db0, 0x3da2, 0x3db1, 0x3dde, 0x3e2f, 0x3ea5, 0x3f40,
0x0000, 0x00e5, 0x01ee, 0x0315, 0x0456, 0x05aa, 0x0709, 0x086c,
0x09c9, 0x0b15, 0x0c4a, 0x0d5d, 0x0e4a, 0x0f06, 0x0f91, 0x0fe5,
0x1000, 0x0fe5, 0x0f91, 0x0f06, 0x0e4a, 0x0d5d, 0x0c4a, 0x0b15,
0x09c9, 0x086c, 0x0709, 0x05aa, 0x0456, 0x0315, 0x01ee, 0x00e5,
0x0000, 0x3f40, 0x3ea5, 0x3e2f, 0x3dde, 0x3db1, 0x3da2, 0x3db0,
0x3dd4, 0x3e0a, 0x3e4f, 0x3e9b, 0x3eea, 0x3f39, 0x3f83, 0x3fc6,
0x0000, 0x002f, 0x0053, 0x006c, 0x007a, 0x0080, 0x007d, 0x0073,
0x0064, 0x0052, 0x003f, 0x002e, 0x001e, 0x0011, 0x0007, 0x0001
},
[MALIDP_DOWNSCALING_1_5_COEFFS - 1] = {
0x0059, 0x004f, 0x0041, 0x002e, 0x0016, 0x3ffb, 0x3fd9, 0x3fb4,
0x3f8c, 0x3f62, 0x3f36, 0x3f09, 0x3edd, 0x3eb3, 0x3e8d, 0x3e6c,
0x3e52, 0x3e3f, 0x3e35, 0x3e37, 0x3e46, 0x3e61, 0x3e8c, 0x3ec5,
0x3f0f, 0x3f68, 0x3fd1, 0x004a, 0x00d3, 0x0169, 0x020b, 0x02b8,
0x036e, 0x042d, 0x04f2, 0x05b9, 0x0681, 0x0745, 0x0803, 0x08ba,
0x0965, 0x0a03, 0x0a91, 0x0b0d, 0x0b75, 0x0bc6, 0x0c00, 0x0c20,
0x0c28, 0x0c20, 0x0c00, 0x0bc6, 0x0b75, 0x0b0d, 0x0a91, 0x0a03,
0x0965, 0x08ba, 0x0803, 0x0745, 0x0681, 0x05b9, 0x04f2, 0x042d,
0x036e, 0x02b8, 0x020b, 0x0169, 0x00d3, 0x004a, 0x3fd1, 0x3f68,
0x3f0f, 0x3ec5, 0x3e8c, 0x3e61, 0x3e46, 0x3e37, 0x3e35, 0x3e3f,
0x3e52, 0x3e6c, 0x3e8d, 0x3eb3, 0x3edd, 0x3f09, 0x3f36, 0x3f62,
0x3f8c, 0x3fb4, 0x3fd9, 0x3ffb, 0x0016, 0x002e, 0x0041, 0x004f
},
[MALIDP_DOWNSCALING_2_COEFFS - 1] = {
0x3f19, 0x3f03, 0x3ef0, 0x3edf, 0x3ed0, 0x3ec5, 0x3ebd, 0x3eb9,
0x3eb9, 0x3ebf, 0x3eca, 0x3ed9, 0x3eef, 0x3f0a, 0x3f2c, 0x3f52,
0x3f7f, 0x3fb0, 0x3fe8, 0x0026, 0x006a, 0x00b4, 0x0103, 0x0158,
0x01b1, 0x020d, 0x026c, 0x02cd, 0x032f, 0x0392, 0x03f4, 0x0455,
0x04b4, 0x051e, 0x0585, 0x05eb, 0x064c, 0x06a8, 0x06fe, 0x074e,
0x0796, 0x07d5, 0x080c, 0x0839, 0x085c, 0x0875, 0x0882, 0x0887,
0x0881, 0x0887, 0x0882, 0x0875, 0x085c, 0x0839, 0x080c, 0x07d5,
0x0796, 0x074e, 0x06fe, 0x06a8, 0x064c, 0x05eb, 0x0585, 0x051e,
0x04b4, 0x0455, 0x03f4, 0x0392, 0x032f, 0x02cd, 0x026c, 0x020d,
0x01b1, 0x0158, 0x0103, 0x00b4, 0x006a, 0x0026, 0x3fe8, 0x3fb0,
0x3f7f, 0x3f52, 0x3f2c, 0x3f0a, 0x3eef, 0x3ed9, 0x3eca, 0x3ebf,
0x3eb9, 0x3eb9, 0x3ebd, 0x3ec5, 0x3ed0, 0x3edf, 0x3ef0, 0x3f03
},
[MALIDP_DOWNSCALING_2_75_COEFFS - 1] = {
0x3f51, 0x3f60, 0x3f71, 0x3f84, 0x3f98, 0x3faf, 0x3fc8, 0x3fe3,
0x0000, 0x001f, 0x0040, 0x0064, 0x008a, 0x00b1, 0x00da, 0x0106,
0x0133, 0x0160, 0x018e, 0x01bd, 0x01ec, 0x021d, 0x024e, 0x0280,
0x02b2, 0x02e4, 0x0317, 0x0349, 0x037c, 0x03ad, 0x03df, 0x0410,
0x0440, 0x0468, 0x048f, 0x04b3, 0x04d6, 0x04f8, 0x0516, 0x0533,
0x054e, 0x0566, 0x057c, 0x0590, 0x05a0, 0x05ae, 0x05ba, 0x05c3,
0x05c9, 0x05c3, 0x05ba, 0x05ae, 0x05a0, 0x0590, 0x057c, 0x0566,
0x054e, 0x0533, 0x0516, 0x04f8, 0x04d6, 0x04b3, 0x048f, 0x0468,
0x0440, 0x0410, 0x03df, 0x03ad, 0x037c, 0x0349, 0x0317, 0x02e4,
0x02b2, 0x0280, 0x024e, 0x021d, 0x01ec, 0x01bd, 0x018e, 0x0160,
0x0133, 0x0106, 0x00da, 0x00b1, 0x008a, 0x0064, 0x0040, 0x001f,
0x0000, 0x3fe3, 0x3fc8, 0x3faf, 0x3f98, 0x3f84, 0x3f71, 0x3f60
},
[MALIDP_DOWNSCALING_4_COEFFS - 1] = {
0x0094, 0x00a9, 0x00be, 0x00d4, 0x00ea, 0x0101, 0x0118, 0x012f,
0x0148, 0x0160, 0x017a, 0x0193, 0x01ae, 0x01c8, 0x01e4, 0x01ff,
0x021c, 0x0233, 0x024a, 0x0261, 0x0278, 0x028f, 0x02a6, 0x02bd,
0x02d4, 0x02eb, 0x0302, 0x0319, 0x032f, 0x0346, 0x035d, 0x0374,
0x038a, 0x0397, 0x03a3, 0x03af, 0x03bb, 0x03c6, 0x03d1, 0x03db,
0x03e4, 0x03ed, 0x03f6, 0x03fe, 0x0406, 0x040d, 0x0414, 0x041a,
0x0420, 0x041a, 0x0414, 0x040d, 0x0406, 0x03fe, 0x03f6, 0x03ed,
0x03e4, 0x03db, 0x03d1, 0x03c6, 0x03bb, 0x03af, 0x03a3, 0x0397,
0x038a, 0x0374, 0x035d, 0x0346, 0x032f, 0x0319, 0x0302, 0x02eb,
0x02d4, 0x02bd, 0x02a6, 0x028f, 0x0278, 0x0261, 0x024a, 0x0233,
0x021c, 0x01ff, 0x01e4, 0x01c8, 0x01ae, 0x0193, 0x017a, 0x0160,
0x0148, 0x012f, 0x0118, 0x0101, 0x00ea, 0x00d4, 0x00be, 0x00a9
},
};
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
static int malidp500_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID);
/* bit 4 of the CONFIG_ID register holds the line size multiplier */
u8 ln_size_mult = conf & 0x10 ? 2 : 1;
hwdev->min_line_size = 2;
hwdev->max_line_size = SZ_2K * ln_size_mult;
hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult;
hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */
return 0;
}
static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
break;
/*
* entering config mode can take as long as the rendering
* of a full frame, hence the long sleep here
*/
usleep_range(1000, 10000);
count--;
}
WARN(count == 0, "timeout while entering config mode");
}
static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
count--;
}
WARN(count == 0, "timeout while leaving config mode");
}
static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
{
u32 status;
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
return true;
return false;
}
static void malidp500_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
if (value)
malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
else
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
}
static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = 0;
malidp_hw_write(hwdev, hwdev->output_color_depth,
hwdev->hw->map.out_depth_base);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
val |= MALIDP500_HSYNCPOL;
if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
val |= MALIDP500_VSYNCPOL;
val |= MALIDP_DE_DEFAULT_PREFETCH_START;
malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL);
/*
* Mali-DP500 encodes the background color like this:
* - red @ MALIDP500_BGND_COLOR[12:0]
* - green @ MALIDP500_BGND_COLOR[27:16]
* - blue @ (MALIDP500_BGND_COLOR + 4)[12:0]
*/
val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) |
(MALIDP_BGND_COLOR_R & 0xfff);
malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR);
malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4);
val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
MALIDP_DE_H_BACKPORCH(mode->hback_porch);
malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) |
MALIDP_DE_V_BACKPORCH(mode->vback_porch);
malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
if (mode->flags & DISPLAY_FLAGS_INTERLACED)
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
/*
* Program the RQoS register to avoid high resolutions flicker
* issue on the LS1028A.
*/
if (hwdev->arqos_value) {
val = hwdev->arqos_value;
malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
}
}
int malidp_format_get_bpp(u32 fmt)
{
const struct drm_format_info *info = drm_format_info(fmt);
int bpp = info->cpp[0] * 8;
if (bpp == 0) {
switch (fmt) {
case DRM_FORMAT_VUY101010:
bpp = 30;
break;
case DRM_FORMAT_YUV420_10BIT:
bpp = 15;
break;
case DRM_FORMAT_YUV420_8BIT:
bpp = 12;
break;
default:
bpp = 0;
}
}
return bpp;
}
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
u16 h, u32 fmt, bool has_modifier)
{
/*
* Each layer needs enough rotation memory to fit 8 lines
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
int bpp = malidp_format_get_bpp(fmt);
return w * bpp;
}
static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
u32 direction,
u16 addr,
u8 coeffs_id)
{
int i;
u16 scaling_control = MALIDP500_SE_CONTROL + MALIDP_SE_SCALING_CONTROL;
malidp_hw_write(hwdev,
direction | (addr & MALIDP_SE_COEFFTAB_ADDR_MASK),
scaling_control + MALIDP_SE_COEFFTAB_ADDR);
for (i = 0; i < ARRAY_SIZE(dp500_se_scaling_coeffs); ++i)
malidp_hw_write(hwdev, MALIDP_SE_SET_COEFFTAB_DATA(
dp500_se_scaling_coeffs[coeffs_id][i]),
scaling_control + MALIDP_SE_COEFFTAB_DATA);
}
static int malidp500_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct malidp_se_config *old_config)
{
/* Get array indices into dp500_se_scaling_coeffs. */
u8 h = (u8)se_config->hcoeff - 1;
u8 v = (u8)se_config->vcoeff - 1;
if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) ||
v >= ARRAY_SIZE(dp500_se_scaling_coeffs)))
return -EINVAL;
if ((h == v) && (se_config->hcoeff != old_config->hcoeff ||
se_config->vcoeff != old_config->vcoeff)) {
malidp500_se_write_pp_coefftab(hwdev,
(MALIDP_SE_V_COEFFTAB |
MALIDP_SE_H_COEFFTAB),
0, v);
} else {
if (se_config->vcoeff != old_config->vcoeff)
malidp500_se_write_pp_coefftab(hwdev,
MALIDP_SE_V_COEFFTAB,
0, v);
if (se_config->hcoeff != old_config->hcoeff)
malidp500_se_write_pp_coefftab(hwdev,
MALIDP_SE_H_COEFFTAB,
0, h);
}
return 0;
}
static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct videomode *vm)
{
unsigned long mclk;
unsigned long pxlclk = vm->pixelclock; /* Hz */
unsigned long htotal = vm->hactive + vm->hfront_porch +
vm->hback_porch + vm->hsync_len;
unsigned long input_size = se_config->input_w * se_config->input_h;
unsigned long a = 10;
long ret;
/*
* mclk = max(a, 1.5) * pxlclk
*
* To avoid float calculaiton, using 15 instead of 1.5 and div by
* 10 to get mclk.
*/
if (se_config->scale_enable) {
a = 15 * input_size / (htotal * se_config->output_h);
if (a < 15)
a = 15;
}
mclk = a * pxlclk / 10;
ret = clk_get_rate(hwdev->mclk);
if (ret < mclk) {
DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
mclk / 1000);
return -EINVAL;
}
return ret;
}
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP500_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
/* enable the scaling engine block */
malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
/* restart the writeback if already enabled */
if (hwdev->mw_state != MW_NOT_ENABLED)
hwdev->mw_state = MW_RESTART;
else
hwdev->mw_state = MW_START;
malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
switch (num_planes) {
case 2:
malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
fallthrough;
case 1:
malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
break;
default:
WARN(1, "Invalid number of planes");
}
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP500_SE_MEMWRITE_OUT_SIZE);
if (rgb2yuv_coeffs) {
int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
}
}
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
return 0;
}
static void malidp500_disable_memwrite(struct malidp_hw_device *hwdev)
{
u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
if (hwdev->mw_state == MW_START || hwdev->mw_state == MW_RESTART)
hwdev->mw_state = MW_STOP;
malidp_hw_clearbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
}
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
u8 ln_size = (conf >> 4) & 0x3, rsize;
hwdev->min_line_size = 2;
switch (ln_size) {
case 0:
hwdev->max_line_size = SZ_2K;
/* two banks of 64KB for rotation memory */
rsize = 64;
break;
case 1:
hwdev->max_line_size = SZ_4K;
/* two banks of 128KB for rotation memory */
rsize = 128;
break;
case 2:
hwdev->max_line_size = 1280;
/* two banks of 40KB for rotation memory */
rsize = 40;
break;
case 3:
/* reserved value */
hwdev->max_line_size = 0;
return -EINVAL;
}
hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
return 0;
}
static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
break;
/*
* entering config mode can take as long as the rendering
* of a full frame, hence the long sleep here
*/
usleep_range(1000, 10000);
count--;
}
WARN(count == 0, "timeout while entering config mode");
}
static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
count--;
}
WARN(count == 0, "timeout while leaving config mode");
}
static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
{
u32 status;
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
return true;
return false;
}
static void malidp550_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
if (value)
malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
else
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
}
static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
malidp_hw_write(hwdev, hwdev->output_color_depth,
hwdev->hw->map.out_depth_base);
malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
/*
* Mali-DP550 and Mali-DP650 encode the background color like this:
* - red @ MALIDP550_DE_BGND_COLOR[23:16]
* - green @ MALIDP550_DE_BGND_COLOR[15:8]
* - blue @ MALIDP550_DE_BGND_COLOR[7:0]
*
* We need to truncate the least significant 4 bits from the default
* MALIDP_BGND_COLOR_x values
*/
val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) |
(((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) |
((MALIDP_BGND_COLOR_B >> 4) & 0xff);
malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR);
val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
MALIDP_DE_H_BACKPORCH(mode->hback_porch);
malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) |
MALIDP_DE_V_BACKPORCH(mode->vback_porch);
malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
val |= MALIDP550_HSYNCPOL;
if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
val |= MALIDP550_VSYNCPOL;
malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
if (mode->flags & DISPLAY_FLAGS_INTERLACED)
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
static int malidpx50_get_bytes_per_column(u32 fmt)
{
u32 bytes_per_column;
switch (fmt) {
/* 8 lines at 4 bytes per pixel */
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
/* 16 lines at 2 bytes per pixel */
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_X0L0:
bytes_per_column = 32;
break;
/* 16 lines at 1.5 bytes per pixel */
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420:
/* 8 lines at 3 bytes per pixel */
case DRM_FORMAT_VUY888:
/* 16 lines at 12 bits per pixel */
case DRM_FORMAT_YUV420_8BIT:
/* 8 lines at 3 bytes per pixel */
case DRM_FORMAT_P010:
bytes_per_column = 24;
break;
/* 8 lines at 30 bits per pixel */
case DRM_FORMAT_VUY101010:
/* 16 lines at 15 bits per pixel */
case DRM_FORMAT_YUV420_10BIT:
bytes_per_column = 30;
break;
default:
return -EINVAL;
}
return bytes_per_column;
}
static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
u16 h, u32 fmt, bool has_modifier)
{
int bytes_per_column = 0;
switch (fmt) {
/* 8 lines at 15 bits per pixel */
case DRM_FORMAT_YUV420_10BIT:
bytes_per_column = 15;
break;
/* Uncompressed YUV 420 10 bit single plane cannot be rotated */
case DRM_FORMAT_X0L2:
if (has_modifier)
bytes_per_column = 8;
else
return -EINVAL;
break;
default:
bytes_per_column = malidpx50_get_bytes_per_column(fmt);
}
if (bytes_per_column == -EINVAL)
return bytes_per_column;
return w * bytes_per_column;
}
static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
u16 h, u32 fmt, bool has_modifier)
{
int bytes_per_column = 0;
switch (fmt) {
/* 16 lines at 2 bytes per pixel */
case DRM_FORMAT_X0L2:
bytes_per_column = 32;
break;
default:
bytes_per_column = malidpx50_get_bytes_per_column(fmt);
}
if (bytes_per_column == -EINVAL)
return bytes_per_column;
return w * bytes_per_column;
}
static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct malidp_se_config *old_config)
{
u32 mask = MALIDP550_SE_CTL_VCSEL(MALIDP550_SE_CTL_SEL_MASK) |
MALIDP550_SE_CTL_HCSEL(MALIDP550_SE_CTL_SEL_MASK);
u32 new_value = MALIDP550_SE_CTL_VCSEL(se_config->vcoeff) |
MALIDP550_SE_CTL_HCSEL(se_config->hcoeff);
malidp_hw_clearbits(hwdev, mask, MALIDP550_SE_CONTROL);
malidp_hw_setbits(hwdev, new_value, MALIDP550_SE_CONTROL);
return 0;
}
static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct videomode *vm)
{
unsigned long mclk;
unsigned long pxlclk = vm->pixelclock;
unsigned long htotal = vm->hactive + vm->hfront_porch +
vm->hback_porch + vm->hsync_len;
unsigned long numerator = 1, denominator = 1;
long ret;
if (se_config->scale_enable) {
numerator = max(se_config->input_w, se_config->output_w) *
se_config->input_h;
numerator += se_config->output_w *
(se_config->output_h -
min(se_config->input_h, se_config->output_h));
denominator = (htotal - 2) * se_config->output_h;
}
/* mclk can't be slower than pxlclk. */
if (numerator < denominator)
numerator = denominator = 1;
mclk = (pxlclk * numerator) / denominator;
ret = clk_get_rate(hwdev->mclk);
if (ret < mclk) {
DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
mclk / 1000);
return -EINVAL;
}
return ret;
}
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP550_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
/* enable the scaling engine block */
malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
hwdev->mw_state = MW_ONESHOT;
malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
switch (num_planes) {
case 2:
malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
fallthrough;
case 1:
malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
break;
default:
WARN(1, "Invalid number of planes");
}
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP550_SE_MEMWRITE_OUT_SIZE);
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL);
if (rgb2yuv_coeffs) {
int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
}
}
return 0;
}
static void malidp550_disable_memwrite(struct malidp_hw_device *hwdev)
{
u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
malidp_hw_clearbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL);
malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
}
static int malidp650_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
u8 ln_size = (conf >> 4) & 0x3, rsize;
hwdev->min_line_size = 4;
switch (ln_size) {
case 0:
case 2:
/* reserved values */
hwdev->max_line_size = 0;
return -EINVAL;
case 1:
hwdev->max_line_size = SZ_4K;
/* two banks of 128KB for rotation memory */
rsize = 128;
break;
case 3:
hwdev->max_line_size = 2560;
/* two banks of 80KB for rotation memory */
rsize = 80;
}
hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
return 0;
}
const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
[MALIDP_500] = {
.map = {
.coeffs_base = MALIDP500_COEFFS_BASE,
.se_base = MALIDP500_SE_BASE,
.dc_base = MALIDP500_DC_BASE,
.out_depth_base = MALIDP500_OUTPUT_DEPTH,
.features = 0, /* no CLEARIRQ register */
.n_layers = ARRAY_SIZE(malidp500_layers),
.layers = malidp500_layers,
.de_irq_map = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP500_DE_IRQ_AXI_ERR |
MALIDP500_DE_IRQ_VSYNC |
MALIDP500_DE_IRQ_GLOBAL,
.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
.err_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP500_DE_IRQ_AXI_ERR |
MALIDP500_DE_IRQ_SATURATION,
},
.se_irq_map = {
.irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
MALIDP500_SE_IRQ_CONF_VALID |
MALIDP500_SE_IRQ_GLOBAL,
.vsync_irq = MALIDP500_SE_IRQ_CONF_VALID,
.err_mask = MALIDP500_SE_IRQ_INIT_BUSY |
MALIDP500_SE_IRQ_AXI_ERROR |
MALIDP500_SE_IRQ_OVERRUN,
},
.dc_irq_map = {
.irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
.vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
},
.pixel_formats = malidp500_de_formats,
.n_pixel_formats = ARRAY_SIZE(malidp500_de_formats),
.bus_align_bytes = 8,
},
.query_hw = malidp500_query_hw,
.enter_config_mode = malidp500_enter_config_mode,
.leave_config_mode = malidp500_leave_config_mode,
.in_config_mode = malidp500_in_config_mode,
.set_config_valid = malidp500_set_config_valid,
.modeset = malidp500_modeset,
.rotmem_required = malidp500_rotmem_required,
.se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
.se_calc_mclk = malidp500_se_calc_mclk,
.enable_memwrite = malidp500_enable_memwrite,
.disable_memwrite = malidp500_disable_memwrite,
.features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
.map = {
.coeffs_base = MALIDP550_COEFFS_BASE,
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
.features = MALIDP_REGMAP_HAS_CLEARIRQ |
MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT |
MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp550_layers),
.layers = malidp550_layers,
.de_irq_map = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
.err_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP550_DE_IRQ_SATURATION |
MALIDP550_DE_IRQ_AXI_ERR,
},
.se_irq_map = {
.irq_mask = MALIDP550_SE_IRQ_EOW,
.vsync_irq = MALIDP550_SE_IRQ_EOW,
.err_mask = MALIDP550_SE_IRQ_AXI_ERR |
MALIDP550_SE_IRQ_OVR |
MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
.irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
.n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
.bus_align_bytes = 8,
},
.query_hw = malidp550_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
.leave_config_mode = malidp550_leave_config_mode,
.in_config_mode = malidp550_in_config_mode,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
.enable_memwrite = malidp550_enable_memwrite,
.disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
[MALIDP_650] = {
.map = {
.coeffs_base = MALIDP550_COEFFS_BASE,
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
.features = MALIDP_REGMAP_HAS_CLEARIRQ |
MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp650_layers),
.layers = malidp650_layers,
.de_irq_map = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP650_DE_IRQ_DRIFT |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
.err_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP650_DE_IRQ_DRIFT |
MALIDP550_DE_IRQ_SATURATION |
MALIDP550_DE_IRQ_AXI_ERR |
MALIDP650_DE_IRQ_ACEV1 |
MALIDP650_DE_IRQ_ACEV2 |
MALIDP650_DE_IRQ_ACEG |
MALIDP650_DE_IRQ_AXIEP,
},
.se_irq_map = {
.irq_mask = MALIDP550_SE_IRQ_EOW,
.vsync_irq = MALIDP550_SE_IRQ_EOW,
.err_mask = MALIDP550_SE_IRQ_AXI_ERR |
MALIDP550_SE_IRQ_OVR |
MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
.irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp650_de_formats,
.n_pixel_formats = ARRAY_SIZE(malidp650_de_formats),
.bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
.leave_config_mode = malidp550_leave_config_mode,
.in_config_mode = malidp550_in_config_mode,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp650_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
.enable_memwrite = malidp550_enable_memwrite,
.disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
};
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format, bool has_modifier)
{
unsigned int i;
for (i = 0; i < map->n_pixel_formats; i++) {
if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
(map->pixel_formats[i].format == format)) {
/*
* In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier
* is supported by a different h/w format id than
* DRM_FORMAT_YUYV (only).
*/
if (format == DRM_FORMAT_YUYV &&
(has_modifier) &&
(map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2))
return AFBC_YUV_422_FORMAT_ID;
else
return map->pixel_formats[i].id;
}
}
return MALIDP_INVALID_FORMAT_ID;
}
bool malidp_hw_format_is_linear_only(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_XVYU2101010:
case DRM_FORMAT_X0L2:
case DRM_FORMAT_X0L0:
return true;
default:
return false;
}
}
bool malidp_hw_format_is_afbc_only(u32 format)
{
switch (format) {
case DRM_FORMAT_VUY888:
case DRM_FORMAT_VUY101010:
case DRM_FORMAT_YUV420_8BIT:
case DRM_FORMAT_YUV420_10BIT:
return true;
default:
return false;
}
}
static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
{
u32 base = malidp_get_block_base(hwdev, block);
if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
else
malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
}
static irqreturn_t malidp_de_irq(int irq, void *arg)
{
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev;
struct malidp_hw *hw;
const struct malidp_irq_map *de;
u32 status, mask, dc_status;
irqreturn_t ret = IRQ_NONE;
hwdev = malidp->dev;
hw = hwdev->hw;
de = &hw->map.de_irq_map;
/*
* if we are suspended it is likely that we were invoked because
* we share an interrupt line with some other driver, don't try
* to read the hardware registers
*/
if (hwdev->pm_suspended)
return IRQ_NONE;
/* first handle the config valid IRQ */
dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
if (dc_status & hw->map.dc_irq_map.vsync_irq) {
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
/* do we have a page flip event? */
if (malidp->event != NULL) {
spin_lock(&drm->event_lock);
drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
malidp->event = NULL;
spin_unlock(&drm->event_lock);
}
atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
ret = IRQ_WAKE_THREAD;
}
status = malidp_hw_read(hwdev, MALIDP_REG_STATUS);
if (!(status & de->irq_mask))
return ret;
mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
/* keep the status of the enabled interrupts, plus the error bits */
status &= (mask | de->err_mask);
if ((status & de->vsync_irq) && malidp->crtc.enabled)
drm_crtc_handle_vblank(&malidp->crtc);
#ifdef CONFIG_DEBUG_FS
if (status & de->err_mask) {
malidp_error(malidp, &malidp->de_errors, status,
drm_crtc_vblank_count(&malidp->crtc));
}
#endif
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
}
static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
{
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm_to_malidp(drm);
wake_up(&malidp->wq);
return IRQ_HANDLED;
}
void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev)
{
/* ensure interrupts are disabled */
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
/* first enable the DC block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
hwdev->hw->map.dc_irq_map.irq_mask);
/* now enable the DE block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.irq_mask);
}
int malidp_de_irq_init(struct drm_device *drm, int irq)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
/* ensure interrupts are disabled */
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq,
malidp_de_irq_thread_handler,
IRQF_SHARED, "malidp-de", drm);
if (ret < 0) {
DRM_ERROR("failed to install DE IRQ handler\n");
return ret;
}
malidp_de_irq_hw_init(hwdev);
return 0;
}
void malidp_de_irq_fini(struct malidp_hw_device *hwdev)
{
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
hwdev->hw->map.dc_irq_map.irq_mask);
}
static irqreturn_t malidp_se_irq(int irq, void *arg)
{
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_hw *hw = hwdev->hw;
const struct malidp_irq_map *se = &hw->map.se_irq_map;
u32 status, mask;
/*
* if we are suspended it is likely that we were invoked because
* we share an interrupt line with some other driver, don't try
* to read the hardware registers
*/
if (hwdev->pm_suspended)
return IRQ_NONE;
status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
if (!(status & (se->irq_mask | se->err_mask)))
return IRQ_NONE;
#ifdef CONFIG_DEBUG_FS
if (status & se->err_mask)
malidp_error(malidp, &malidp->se_errors, status,
drm_crtc_vblank_count(&malidp->crtc));
#endif
mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
status &= mask;
if (status & se->vsync_irq) {
switch (hwdev->mw_state) {
case MW_ONESHOT:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
break;
case MW_STOP:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
/* disable writeback after stop */
hwdev->mw_state = MW_NOT_ENABLED;
break;
case MW_RESTART:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
fallthrough; /* to a new start */
case MW_START:
/* writeback started, need to emulate one-shot mode */
hw->disable_memwrite(hwdev);
/*
* only set config_valid HW bit if there is no other update
* in progress or if we raced ahead of the DE IRQ handler
* and config_valid flag will not be update until later
*/
status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
if ((atomic_read(&malidp->config_valid) != MALIDP_CONFIG_START) ||
(status & hw->map.dc_irq_map.vsync_irq))
hw->set_config_valid(hwdev, 1);
break;
}
}
malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
return IRQ_HANDLED;
}
void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev)
{
/* ensure interrupts are disabled */
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
hwdev->hw->map.se_irq_map.irq_mask);
}
static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
{
return IRQ_HANDLED;
}
int malidp_se_irq_init(struct drm_device *drm, int irq)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
/* ensure interrupts are disabled */
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq,
malidp_se_irq_thread_handler,
IRQF_SHARED, "malidp-se", drm);
if (ret < 0) {
DRM_ERROR("failed to install SE IRQ handler\n");
return ret;
}
hwdev->mw_state = MW_NOT_ENABLED;
malidp_se_irq_hw_init(hwdev);
return 0;
}
void malidp_se_irq_fini(struct malidp_hw_device *hwdev)
{
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
hwdev->hw->map.se_irq_map.irq_mask);
}
| linux-master | drivers/gpu/drm/arm/malidp_hw.c |
/*
* Copyright (C) 2013-2015 ARM Limited
* Author: Liviu Dudau <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Implementation of a CRTC class for the HDLCD driver.
*/
#include <linux/clk.h>
#include <linux/of_graph.h>
#include <linux/platform_data/simplefb.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
/*
* The HDLCD controller is a dumb RGB streamer that gets connected to
* a single HDMI transmitter or in the case of the ARM Models it gets
* emulated by the software that does the actual rendering.
*
*/
static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
/* stop the controller on cleanup */
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
drm_crtc_cleanup(crtc);
}
static int hdlcd_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask | HDLCD_INTERRUPT_VSYNC);
return 0;
}
static void hdlcd_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask & ~HDLCD_INTERRUPT_VSYNC);
}
static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
.destroy = hdlcd_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = hdlcd_crtc_enable_vblank,
.disable_vblank = hdlcd_crtc_disable_vblank,
};
static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
/*
* Setup the HDLCD registers for decoding the pixels out of the framebuffer
*/
static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
{
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
uint32_t pixel_format;
struct simplefb_format *format = NULL;
int i;
pixel_format = fb->format->format;
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
if (supported_formats[i].fourcc == pixel_format)
format = &supported_formats[i];
}
if (WARN_ON(!format))
return 0;
/* HDLCD uses 'bytes per pixel', zero means 1 byte */
btpp = (format->bits_per_pixel + 7) / 8;
hdlcd_write(hdlcd, HDLCD_REG_PIXEL_FORMAT, (btpp - 1) << 3);
/*
* The format of the HDLCD_REG_<color>_SELECT register is:
* - bits[23:16] - default value for that color component
* - bits[11:8] - number of bits to extract for each color component
* - bits[4:0] - index of the lowest bit to extract
*
* The default color value is used when bits[11:8] are zero, when the
* pixel is outside the visible frame area or when there is a
* buffer underrun.
*/
hdlcd_write(hdlcd, HDLCD_REG_RED_SELECT, format->red.offset |
#ifdef CONFIG_DRM_HDLCD_SHOW_UNDERRUN
0x00ff0000 | /* show underruns in red */
#endif
((format->red.length & 0xf) << 8));
hdlcd_write(hdlcd, HDLCD_REG_GREEN_SELECT, format->green.offset |
((format->green.length & 0xf) << 8));
hdlcd_write(hdlcd, HDLCD_REG_BLUE_SELECT, format->blue.offset |
((format->blue.length & 0xf) << 8));
return 0;
}
static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
struct drm_display_mode *m = &crtc->state->adjusted_mode;
struct videomode vm;
unsigned int polarities, err;
vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
vm.vsync_len = m->crtc_vsync_end - m->crtc_vsync_start;
vm.hfront_porch = m->crtc_hsync_start - m->crtc_hdisplay;
vm.hback_porch = m->crtc_htotal - m->crtc_hsync_end;
vm.hsync_len = m->crtc_hsync_end - m->crtc_hsync_start;
polarities = HDLCD_POLARITY_DATAEN | HDLCD_POLARITY_DATA;
if (m->flags & DRM_MODE_FLAG_PHSYNC)
polarities |= HDLCD_POLARITY_HSYNC;
if (m->flags & DRM_MODE_FLAG_PVSYNC)
polarities |= HDLCD_POLARITY_VSYNC;
/* Allow max number of outstanding requests and largest burst size */
hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
err = hdlcd_set_pxl_fmt(crtc);
if (err)
return;
clk_set_rate(hdlcd->clk, m->crtc_clock * 1000);
}
static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
clk_prepare_enable(hdlcd->clk);
hdlcd_crtc_mode_set_nofb(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
drm_crtc_vblank_on(crtc);
}
static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
drm_crtc_vblank_off(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
clk_disable_unprepare(hdlcd->clk);
}
static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
long rate, clk_rate = mode->clock * 1000;
rate = clk_round_rate(hdlcd->clk, clk_rate);
/* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
if (abs(rate - clk_rate) * 1000 > clk_rate) {
/* clock required by mode not supported by hardware */
return MODE_NOCLOCK;
}
return MODE_OK;
}
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_pending_vblank_event *event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
.mode_valid = hdlcd_crtc_mode_valid,
.atomic_begin = hdlcd_crtc_atomic_begin,
.atomic_enable = hdlcd_crtc_atomic_enable,
.atomic_disable = hdlcd_crtc_atomic_disable,
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
int i;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
u32 src_h = new_plane_state->src_h >> 16;
/* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
if (src_h >= HDLCD_MAX_YRES) {
DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
return -EINVAL;
}
for_each_new_crtc_in_state(state, crtc, crtc_state,
i) {
/* we cannot disable the plane while the CRTC is active */
if (!new_plane_state->fb && crtc_state->active)
return -EINVAL;
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
}
return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct hdlcd_drm_private *hdlcd;
u32 dest_h;
dma_addr_t scanout_start;
if (!fb)
return;
dest_h = drm_rect_height(&new_plane_state->dst);
scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
hdlcd = drm_to_hdlcd_priv(plane->dev);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
}
static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
.atomic_check = hdlcd_plane_atomic_check,
.atomic_update = hdlcd_plane_atomic_update,
};
static const struct drm_plane_funcs hdlcd_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct drm_plane *plane = NULL;
u32 formats[ARRAY_SIZE(supported_formats)], i;
for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
formats[i] = supported_formats[i].fourcc;
plane = drmm_universal_plane_alloc(drm, struct drm_plane, dev, 0xff,
&hdlcd_plane_funcs,
formats, ARRAY_SIZE(formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (IS_ERR(plane))
return plane;
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
hdlcd->plane = plane;
return plane;
}
int hdlcd_setup_crtc(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct drm_plane *primary;
int ret;
primary = hdlcd_plane_init(drm);
if (IS_ERR(primary))
return PTR_ERR(primary);
ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
&hdlcd_crtc_funcs, NULL);
if (ret)
return ret;
drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
return 0;
}
| linux-master | drivers/gpu/drm/arm/hdlcd_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Liviu Dudau <[email protected]>
*
* ARM Mali DP plane manipulation routines.
*/
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include "malidp_hw.h"
#include "malidp_drv.h"
/* Layer specific register offsets */
#define MALIDP_LAYER_FORMAT 0x000
#define LAYER_FORMAT_MASK 0x3f
#define MALIDP_LAYER_CONTROL 0x004
#define LAYER_ENABLE (1 << 0)
#define LAYER_FLOWCFG_MASK 7
#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1)
#define LAYER_FLOWCFG_SCALE_SE 3
#define LAYER_ROT_OFFSET 8
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
#define LAYER_ROT_MASK (0xf << 8)
#define LAYER_COMP_MASK (0x3 << 12)
#define LAYER_COMP_PIXEL (0x3 << 12)
#define LAYER_COMP_PLANE (0x2 << 12)
#define LAYER_PMUL_ENABLE (0x1 << 14)
#define LAYER_ALPHA_OFFSET (16)
#define LAYER_ALPHA_MASK (0xff)
#define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
#define MALIDP_LAYER_COMP_SIZE 0x010
#define MALIDP_LAYER_OFFSET 0x014
#define MALIDP550_LS_ENABLE 0x01c
#define MALIDP550_LS_R1_IN_SIZE 0x020
#define MODIFIERS_COUNT_MAX 15
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
* for formats with 1- or 2-bit alpha channels.
* We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
* opacity for 2-bit formats.
*/
#define MALIDP_ALPHA_LUT 0xffaa5500
/* page sizes the MMU prefetcher can support */
#define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES (SZ_4K | SZ_64K)
#define MALIDP_MMU_PREFETCH_FULL_PGSIZES (SZ_1M | SZ_2M)
/* readahead for partial-frame prefetch */
#define MALIDP_MMU_PREFETCH_READAHEAD 8
/*
* Replicate what the default ->reset hook does: free the state pointer and
* allocate a new empty object. We just need enough space to store
* a malidp_plane_state instead of a drm_plane_state.
*/
static void malidp_plane_reset(struct drm_plane *plane)
{
struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
if (state)
__drm_atomic_helper_plane_destroy_state(&state->base);
kfree(state);
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_plane_reset(plane, &state->base);
}
static struct
drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
struct malidp_plane_state *state, *m_state;
if (!plane->state)
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
m_state = to_malidp_plane_state(plane->state);
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->rotmem_size = m_state->rotmem_size;
state->format = m_state->format;
state->n_planes = m_state->n_planes;
state->mmu_prefetch_mode = m_state->mmu_prefetch_mode;
state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize;
return &state->base;
}
static void malidp_destroy_plane_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct malidp_plane_state *m_state = to_malidp_plane_state(state);
__drm_atomic_helper_plane_destroy_state(state);
kfree(m_state);
}
static const char * const prefetch_mode_names[] = {
[MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE",
[MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL",
[MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL",
};
static void malidp_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct malidp_plane_state *ms = to_malidp_plane_state(state);
drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
drm_printf(p, "\tformat_id=%u\n", ms->format);
drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
drm_printf(p, "\tmmu_prefetch_mode=%s\n",
prefetch_mode_names[ms->mmu_prefetch_mode]);
drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
}
bool malidp_format_mod_supported(struct drm_device *drm,
u32 format, u64 modifier)
{
const struct drm_format_info *info;
const u64 *modifiers;
struct malidp_drm *malidp = drm_to_malidp(drm);
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
return false;
/* Some pixel formats are supported without any modifier */
if (modifier == DRM_FORMAT_MOD_LINEAR) {
/*
* However these pixel formats need to be supported with
* modifiers only
*/
return !malidp_hw_format_is_afbc_only(format);
}
if (!fourcc_mod_is_vendor(modifier, ARM)) {
DRM_ERROR("Unknown modifier (not Arm)\n");
return false;
}
if (modifier &
~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
DRM_DEBUG_KMS("Unsupported modifiers\n");
return false;
}
modifiers = malidp_format_modifiers;
/* SPLIT buffers must use SPARSE layout */
if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
return false;
/* CBR only applies to YUV formats, where YTR should be always 0 */
if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
return false;
while (*modifiers != DRM_FORMAT_MOD_INVALID) {
if (*modifiers == modifier)
break;
modifiers++;
}
/* return false, if the modifier was not found */
if (*modifiers == DRM_FORMAT_MOD_INVALID) {
DRM_DEBUG_KMS("Unsupported modifier\n");
return false;
}
info = drm_format_info(format);
if (info->num_planes != 1) {
DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
return false;
}
if (malidp_hw_format_is_linear_only(format) == true) {
DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
format);
return false;
}
/*
* RGB formats need to provide YTR modifier and YUV formats should not
* provide YTR modifier.
*/
if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
info->is_yuv ? "disallowed" : "mandatory",
info->is_yuv ? "YUV" : "RGB");
return false;
}
if (modifier & AFBC_SPLIT) {
if (!info->is_yuv) {
if (info->cpp[0] <= 2) {
DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
return false;
}
}
if ((info->hsub != 1) || (info->vsub != 1)) {
if (!(format == DRM_FORMAT_YUV420_10BIT &&
(map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
return false;
}
}
}
if (modifier & AFBC_CBR) {
if ((info->hsub == 1) || (info->vsub == 1)) {
DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
return false;
}
}
return true;
}
static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
u32 format, u64 modifier)
{
return malidp_format_mod_supported(plane->dev, format, modifier);
}
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = malidp_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
.atomic_destroy_state = malidp_destroy_plane_state,
.atomic_print_state = malidp_plane_atomic_print_state,
.format_mod_supported = malidp_format_mod_supported_per_plane,
};
static int malidp_se_check_scaling(struct malidp_plane *mp,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(state->state, state->crtc);
struct malidp_crtc_state *mc;
u32 src_w, src_h;
int ret;
if (!crtc_state)
return -EINVAL;
mc = to_malidp_crtc_state(crtc_state);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
0, INT_MAX, true, true);
if (ret)
return ret;
if (state->rotation & MALIDP_ROTATED_MASK) {
src_w = state->src_h >> 16;
src_h = state->src_w >> 16;
} else {
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
}
if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
/* Scaling not necessary for this plane. */
mc->scaled_planes_mask &= ~(mp->layer->id);
return 0;
}
if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
return -EINVAL;
mc->scaled_planes_mask |= mp->layer->id;
/* Defer scaling requirements calculation to the crtc check. */
return 0;
}
static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
{
struct iommu_domain *mmu_dom;
mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev);
if (mmu_dom)
return mmu_dom->pgsize_bitmap;
return 0;
}
/*
* Check if the framebuffer is entirely made up of pages at least pgsize in
* size. Only a heuristic: assumes that each scatterlist entry has been aligned
* to the largest page size smaller than its length and that the MMU maps to
* the largest page size possible.
*/
static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
u32 pgsize)
{
int i;
for (i = 0; i < ms->n_planes; i++) {
struct drm_gem_object *obj;
struct drm_gem_dma_object *dma_obj;
struct sg_table *sgt;
struct scatterlist *sgl;
obj = drm_gem_fb_get_obj(ms->base.fb, i);
dma_obj = to_drm_gem_dma_obj(obj);
if (dma_obj->sgt)
sgt = dma_obj->sgt;
else
sgt = obj->funcs->get_sg_table(obj);
if (IS_ERR(sgt))
return false;
sgl = sgt->sgl;
while (sgl) {
if (sgl->length < pgsize) {
if (!dma_obj->sgt)
kfree(sgt);
return false;
}
sgl = sg_next(sgl);
}
if (!dma_obj->sgt)
kfree(sgt);
}
return true;
}
/*
* Check if it is possible to enable partial-frame MMU prefetch given the
* current format, AFBC state and rotation.
*/
static bool malidp_partial_prefetch_supported(u32 format, u64 modifier,
unsigned int rotation)
{
bool afbc, sparse;
/* rotation and horizontal flip not supported for partial prefetch */
if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X))
return false;
afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0);
sparse = modifier & AFBC_FORMAT_MOD_SPARSE;
switch (format) {
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_RGB565:
/* always supported */
return true;
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_BGR565:
/* supported, but if AFBC then must be sparse mode */
return (!afbc) || (afbc && sparse);
case DRM_FORMAT_BGR888:
/* supported, but not for AFBC */
return !afbc;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420:
/* not supported */
return false;
default:
return false;
}
}
/*
* Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as
* long as the framebuffer is all large pages. Otherwise partial-frame prefetch
* is selected as long as it is supported for the current format. The selected
* page size for prefetch is returned in pgsize_bitmap.
*/
static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode
(struct malidp_plane_state *ms, u32 *pgsize_bitmap)
{
u32 pgsizes;
/* get the full-frame prefetch page size(s) supported by the MMU */
pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES;
while (pgsizes) {
u32 largest_pgsize = 1 << __fls(pgsizes);
if (malidp_check_pages_threshold(ms, largest_pgsize)) {
*pgsize_bitmap = largest_pgsize;
return MALIDP_PREFETCH_MODE_FULL;
}
pgsizes -= largest_pgsize;
}
/* get the partial-frame prefetch page size(s) supported by the MMU */
pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES;
if (malidp_partial_prefetch_supported(ms->base.fb->format->format,
ms->base.fb->modifier,
ms->base.rotation)) {
/* partial prefetch using the smallest page size */
*pgsize_bitmap = 1 << __ffs(pgsizes);
return MALIDP_PREFETCH_MODE_PARTIAL;
}
*pgsize_bitmap = 0;
return MALIDP_PREFETCH_MODE_NONE;
}
static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode,
u8 readahead, u8 n_planes, u32 pgsize)
{
u32 mmu_ctrl = 0;
if (mode != MALIDP_PREFETCH_MODE_NONE) {
mmu_ctrl |= MALIDP_MMU_CTRL_EN;
if (mode == MALIDP_PREFETCH_MODE_PARTIAL) {
mmu_ctrl |= MALIDP_MMU_CTRL_MODE;
mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead);
}
if (pgsize == SZ_64K || pgsize == SZ_2M) {
int i;
for (i = 0; i < n_planes; i++)
mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i);
}
}
return mmu_ctrl;
}
static void malidp_de_prefetch_settings(struct malidp_plane *mp,
struct malidp_plane_state *ms)
{
if (!mp->layer->mmu_ctrl_offset)
return;
/* get the page sizes supported by the MMU */
ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp);
ms->mmu_prefetch_mode =
malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize);
}
static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state);
bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK;
struct drm_framebuffer *fb;
u16 pixel_alpha = new_plane_state->pixel_blend_mode;
int i, ret;
unsigned int block_w, block_h;
if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb))
return 0;
fb = new_plane_state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
mp->layer->id, fb->format->format,
!!fb->modifier);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
ms->n_planes = fb->format->num_planes;
for (i = 0; i < ms->n_planes; i++) {
u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
& (alignment - 1)) && !(fb->modifier)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
}
}
block_w = drm_format_info_block_width(fb->format, 0);
block_h = drm_format_info_block_height(fb->format, 0);
if (fb->width % block_w || fb->height % block_h) {
DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
return -EINVAL;
}
if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) {
DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
return -EINVAL;
}
if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) ||
(new_plane_state->crtc_h > mp->hwdev->max_line_size) ||
(new_plane_state->crtc_w < mp->hwdev->min_line_size) ||
(new_plane_state->crtc_h < mp->hwdev->min_line_size))
return -EINVAL;
/*
* DP550/650 video layers can accept 3 plane formats only if
* fb->pitches[1] == fb->pitches[2] since they don't have a
* third plane stride register.
*/
if (ms->n_planes == 3 &&
!(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
(new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2]))
return -EINVAL;
ret = malidp_se_check_scaling(mp, new_plane_state);
if (ret)
return ret;
/* validate the rotation constraints for each layer */
if (new_plane_state->rotation != DRM_MODE_ROTATE_0) {
if (mp->layer->rot == ROTATE_NONE)
return -EINVAL;
if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
return -EINVAL;
/*
* packed RGB888 / BGR888 can't be rotated or flipped
* unless they are stored in a compressed way
*/
if ((fb->format->format == DRM_FORMAT_RGB888 ||
fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier))
return -EINVAL;
}
/* SMART layer does not support AFBC */
if (mp->layer->id == DE_SMART && fb->modifier) {
DRM_ERROR("AFBC framebuffer not supported in SMART layer");
return -EINVAL;
}
ms->rotmem_size = 0;
if (new_plane_state->rotation & MALIDP_ROTATED_MASK) {
int val;
val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w,
new_plane_state->crtc_h,
fb->format->format,
!!(fb->modifier));
if (val < 0)
return val;
ms->rotmem_size = val;
}
/* HW can't support plane + pixel blending */
if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
(pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
fb->format->has_alpha)
return -EINVAL;
malidp_de_prefetch_settings(mp, ms);
return 0;
}
static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
int num_planes, unsigned int pitches[3])
{
int i;
int num_strides = num_planes;
if (!mp->layer->stride_offset)
return;
if (num_planes == 3)
num_strides = (mp->hwdev->hw->features &
MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
/*
* The drm convention for pitch is that it needs to cover width * cpp,
* but our hardware wants the pitch/stride to cover all rows included
* in a tile.
*/
for (i = 0; i < num_strides; ++i) {
unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
malidp_hw_write(mp->hwdev, pitches[i] * block_h,
mp->layer->base +
mp->layer->stride_offset + i * 4);
}
}
static const s16
malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = {
[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
1192, 0, 1634,
1192, -401, -832,
1192, 2066, 0,
64, 512, 512
},
[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
1024, 0, 1436,
1024, -352, -731,
1024, 1815, 0,
0, 512, 512
},
[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
1192, 0, 1836,
1192, -218, -546,
1192, 2163, 0,
64, 512, 512
},
[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
1024, 0, 1613,
1024, -192, -479,
1024, 1900, 0,
0, 512, 512
},
[DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
1024, 0, 1476,
1024, -165, -572,
1024, 1884, 0,
0, 512, 512
},
[DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
1024, 0, 1510,
1024, -168, -585,
1024, 1927, 0,
0, 512, 512
}
};
static void malidp_de_set_color_encoding(struct malidp_plane *plane,
enum drm_color_encoding enc,
enum drm_color_range range)
{
unsigned int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
/* coefficients are signed, two's complement values */
malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
plane->layer->base + plane->layer->yuv2rgb_offset +
i * 4);
}
}
static void malidp_de_set_mmu_control(struct malidp_plane *mp,
struct malidp_plane_state *ms)
{
u32 mmu_ctrl;
/* check hardware supports MMU prefetch */
if (!mp->layer->mmu_ctrl_offset)
return;
mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode,
MALIDP_MMU_PREFETCH_READAHEAD,
ms->n_planes,
ms->mmu_prefetch_pgsize);
malidp_hw_write(mp->hwdev, mmu_ctrl,
mp->layer->base + mp->layer->mmu_ctrl_offset);
}
static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
struct malidp_plane *mp,
int plane_index)
{
dma_addr_t dma_addr;
u16 ptr;
struct drm_plane *plane = &mp->base;
bool afbc = fb->modifier ? true : false;
ptr = mp->layer->ptr + (plane_index << 4);
/*
* drm_fb_dma_get_gem_addr() alters the physical base address of the
* framebuffer as per the plane's src_x, src_y co-ordinates (ie to
* take care of source cropping).
* For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
* and _AD_CROP_V registers.
*/
if (!afbc) {
dma_addr = drm_fb_dma_get_gem_addr(fb, plane->state,
plane_index);
} else {
struct drm_gem_dma_object *obj;
obj = drm_fb_dma_get_gem_obj(fb, plane_index);
if (WARN_ON(!obj))
return;
dma_addr = obj->dma_addr;
}
malidp_hw_write(mp->hwdev, lower_32_bits(dma_addr), ptr);
malidp_hw_write(mp->hwdev, upper_32_bits(dma_addr), ptr + 4);
}
static void malidp_de_set_plane_afbc(struct drm_plane *plane)
{
struct malidp_plane *mp;
u32 src_w, src_h, val = 0, src_x, src_y;
struct drm_framebuffer *fb = plane->state->fb;
mp = to_malidp_plane(plane);
/* no afbc_decoder_offset means AFBC is not supported on this plane */
if (!mp->layer->afbc_decoder_offset)
return;
if (!fb->modifier) {
malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
return;
}
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
src_x = plane->state->src_x >> 16;
src_y = plane->state->src_y >> 16;
val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
src_x;
malidp_hw_write(mp->hwdev, val,
mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
src_y;
malidp_hw_write(mp->hwdev, val,
mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
val = MALIDP_AD_EN;
if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
val |= MALIDP_AD_BS;
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
val |= MALIDP_AD_YTR;
malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
}
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct malidp_plane *mp;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u16 pixel_alpha = new_state->pixel_blend_mode;
u8 plane_alpha = new_state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
struct drm_framebuffer *fb = plane->state->fb;
mp = to_malidp_plane(plane);
/*
* For AFBC framebuffer, use the framebuffer width and height for
* configuring layer input size register.
*/
if (fb->modifier) {
src_w = fb->width;
src_h = fb->height;
} else {
/* convert src values from Q16 fixed point to integer */
src_w = new_state->src_w >> 16;
src_h = new_state->src_h >> 16;
}
dest_w = new_state->crtc_w;
dest_h = new_state->crtc_h;
val = malidp_hw_read(mp->hwdev, mp->layer->base);
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
malidp_hw_write(mp->hwdev, val, mp->layer->base);
for (i = 0; i < ms->n_planes; i++)
malidp_set_plane_base_addr(fb, mp, i);
malidp_de_set_mmu_control(mp, ms);
malidp_de_set_plane_pitches(mp, ms->n_planes,
new_state->fb->pitches);
if ((plane->state->color_encoding != old_state->color_encoding) ||
(plane->state->color_range != old_state->color_range))
malidp_de_set_color_encoding(mp, plane->state->color_encoding,
plane->state->color_range);
malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
mp->layer->base + MALIDP_LAYER_SIZE);
malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
mp->layer->base + MALIDP_LAYER_COMP_SIZE);
malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) |
LAYER_V_VAL(new_state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
if (mp->layer->id == DE_SMART) {
/*
* Enable the first rectangle in the SMART layer to be
* able to use it as a drm plane.
*/
malidp_hw_write(mp->hwdev, 1,
mp->layer->base + MALIDP550_LS_ENABLE);
malidp_hw_write(mp->hwdev,
LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
}
malidp_de_set_plane_afbc(plane);
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
if (new_state->rotation & DRM_MODE_ROTATE_MASK)
val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
LAYER_ROT_OFFSET;
if (new_state->rotation & DRM_MODE_REFLECT_X)
val |= LAYER_H_FLIP;
if (new_state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= LAYER_COMP_PLANE;
} else if (new_state->fb->format->has_alpha) {
/* We only care about blend mode if the format has alpha */
switch (pixel_alpha) {
case DRM_MODE_BLEND_PREMULTI:
val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE;
break;
case DRM_MODE_BLEND_COVERAGE:
val |= LAYER_COMP_PIXEL;
break;
}
}
val |= LAYER_ALPHA(plane_alpha);
val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
if (new_state->crtc) {
struct malidp_crtc_state *m =
to_malidp_crtc_state(new_state->crtc->state);
if (m->scaler_config.scale_enable &&
m->scaler_config.plane_src_id == mp->layer->id)
val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
}
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
malidp_hw_write(mp->hwdev, val,
mp->layer->base + MALIDP_LAYER_CONTROL);
}
static void malidp_de_plane_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct malidp_plane *mp = to_malidp_plane(plane);
malidp_hw_clearbits(mp->hwdev,
LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
mp->layer->base + MALIDP_LAYER_CONTROL);
}
static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
.atomic_check = malidp_de_plane_check,
.atomic_update = malidp_de_plane_update,
.atomic_disable = malidp_de_plane_disable,
};
static const uint64_t linear_only_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = BIT(drm->mode_config.num_crtc);
unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
u32 *formats;
int ret, i = 0, j = 0, n;
u64 supported_modifiers[MODIFIERS_COUNT_MAX];
const u64 *modifiers;
modifiers = malidp_format_modifiers;
if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
/*
* Since our hardware does not support SPLIT, so build the list
* of supported modifiers excluding SPLIT ones.
*/
while (*modifiers != DRM_FORMAT_MOD_INVALID) {
if (!(*modifiers & AFBC_SPLIT))
supported_modifiers[j++] = *modifiers;
modifiers++;
}
supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
modifiers = supported_modifiers;
}
formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
if (!formats) {
ret = -ENOMEM;
goto cleanup;
}
for (i = 0; i < map->n_layers; i++) {
u8 id = map->layers[i].id;
/* build the list of DRM supported formats based on the map */
for (n = 0, j = 0; j < map->n_pixel_formats; j++) {
if ((map->pixel_formats[j].layer & id) == id)
formats[n++] = map->pixel_formats[j].format;
}
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
/*
* All the layers except smart layer supports AFBC modifiers.
*/
plane = drmm_universal_plane_alloc(drm, struct malidp_plane, base,
crtcs, &malidp_de_plane_funcs, formats, n,
(id == DE_SMART) ? linear_only_modifiers :
modifiers, plane_type, NULL);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto cleanup;
}
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
drm_plane_create_alpha_property(&plane->base);
drm_plane_create_blend_mode_property(&plane->base, blend_caps);
if (id == DE_SMART) {
/* Skip the features which the SMART layer doesn't have. */
continue;
}
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
plane->layer->base + MALIDP_LAYER_COMPOSE);
/* Attach the YUV->RGB property only to video layers */
if (id & (DE_VIDEO1 | DE_VIDEO2)) {
/* default encoding for YUV->RGB is BT601 NARROW */
enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601;
enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
ret = drm_plane_create_color_properties(&plane->base,
BIT(DRM_COLOR_YCBCR_BT601) | \
BIT(DRM_COLOR_YCBCR_BT709) | \
BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
enc, range);
if (!ret)
/* program the HW registers */
malidp_de_set_color_encoding(plane, enc, range);
else
DRM_WARN("Failed to create video layer %d color properties\n", id);
}
}
kfree(formats);
return 0;
cleanup:
kfree(formats);
return ret;
}
| linux-master | drivers/gpu/drm/arm/malidp_planes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Liviu Dudau <[email protected]>
*
* ARM Mali DP500/DP550/DP650 KMS/DRM driver
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "malidp_drv.h"
#include "malidp_mw.h"
#include "malidp_regs.h"
#include "malidp_hw.h"
#define MALIDP_CONF_VALID_TIMEOUT 250
#define AFBC_HEADER_SIZE 16
#define AFBC_SUPERBLK_ALIGNMENT 128
static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
u32 data[MALIDP_COEFFTAB_NUM_COEFFS])
{
int i;
/* Update all channels with a single gamma curve. */
const u32 gamma_write_mask = GENMASK(18, 16);
/*
* Always write an entire table, so the address field in
* DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask
* directly.
*/
malidp_hw_write(hwdev, gamma_write_mask,
hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
malidp_hw_write(hwdev, data[i],
hwdev->hw->map.coeffs_base +
MALIDP_COEF_TABLE_DATA);
}
static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
if (!crtc->state->color_mgmt_changed)
return;
if (!crtc->state->gamma_lut) {
malidp_hw_clearbits(hwdev,
MALIDP_DISP_FUNC_GAMMA,
MALIDP_DE_DISPLAY_FUNC);
} else {
struct malidp_crtc_state *mc =
to_malidp_crtc_state(crtc->state);
if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id !=
old_state->gamma_lut->base.id))
malidp_write_gamma_table(hwdev, mc->gamma_coeffs);
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA,
MALIDP_DE_DISPLAY_FUNC);
}
}
static
void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
int i;
if (!crtc->state->color_mgmt_changed)
return;
if (!crtc->state->ctm) {
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ,
MALIDP_DE_DISPLAY_FUNC);
} else {
struct malidp_crtc_state *mc =
to_malidp_crtc_state(crtc->state);
if (!old_state->ctm || (crtc->state->ctm->base.id !=
old_state->ctm->base.id))
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
malidp_hw_write(hwdev,
mc->coloradj_coeffs[i],
hwdev->hw->map.coeffs_base +
MALIDP_COLOR_ADJ_COEF + 4 * i);
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
MALIDP_DE_DISPLAY_FUNC);
}
}
static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state);
struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_se_config *s = &cs->scaler_config;
struct malidp_se_config *old_s = &old_cs->scaler_config;
u32 se_control = hwdev->hw->map.se_base +
((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC);
u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
u32 val;
/* Set SE_CONTROL */
if (!s->scale_enable) {
val = malidp_hw_read(hwdev, se_control);
val &= ~MALIDP_SE_SCALING_EN;
malidp_hw_write(hwdev, val, se_control);
return;
}
hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
val = malidp_hw_read(hwdev, se_control);
val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK);
val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0;
val |= MALIDP_SE_RGBO_IF_EN;
malidp_hw_write(hwdev, val, se_control);
/* Set IN_SIZE & OUT_SIZE. */
val = MALIDP_SE_SET_V_SIZE(s->input_h) |
MALIDP_SE_SET_H_SIZE(s->input_w);
malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE);
val = MALIDP_SE_SET_V_SIZE(s->output_h) |
MALIDP_SE_SET_H_SIZE(s->output_w);
malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE);
/* Set phase regs. */
malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH);
malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH);
malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH);
malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH);
}
/*
* set the "config valid" bit and wait until the hardware acts on it
*/
static int malidp_set_and_wait_config_valid(struct drm_device *drm)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
hwdev->hw->set_config_valid(hwdev, 1);
/* don't wait for config_valid flag if we are in config mode */
if (hwdev->hw->in_config_mode(hwdev)) {
atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
return 0;
}
ret = wait_event_interruptible_timeout(malidp->wq,
atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE,
msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
return (ret > 0) ? 0 : -ETIMEDOUT;
}
static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm_to_malidp(drm);
int loop = 5;
malidp->event = malidp->crtc.state->event;
malidp->crtc.state->event = NULL;
if (malidp->crtc.state->active) {
/*
* if we have an event to deliver to userspace, make sure
* the vblank is enabled as we are sending it from the IRQ
* handler.
*/
if (malidp->event)
drm_crtc_vblank_get(&malidp->crtc);
/* only set config_valid if the CRTC is enabled */
if (malidp_set_and_wait_config_valid(drm) < 0) {
/*
* make a loop around the second CVAL setting and
* try 5 times before giving up.
*/
while (loop--) {
if (!malidp_set_and_wait_config_valid(drm))
break;
}
DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
}
} else if (malidp->event) {
/* CRTC inactive means vblank IRQ is disabled, send event directly */
spin_lock_irq(&drm->event_lock);
drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
malidp->event = NULL;
spin_unlock_irq(&drm->event_lock);
}
drm_atomic_helper_commit_hw_done(state);
}
static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm_to_malidp(drm);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
bool fence_cookie = dma_fence_begin_signalling();
pm_runtime_get_sync(drm->dev);
/*
* set config_valid to a special value to let IRQ handlers
* know that we are updating registers
*/
atomic_set(&malidp->config_valid, MALIDP_CONFIG_START);
malidp->dev->hw->set_config_valid(malidp->dev, 0);
drm_atomic_helper_commit_modeset_disables(drm, state);
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
malidp_atomic_commit_se_config(crtc, old_crtc_state);
}
drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
malidp_mw_atomic_commit(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
malidp_atomic_commit_hw_done(state);
dma_fence_end_signalling(fence_cookie);
pm_runtime_put(drm->dev);
drm_atomic_helper_cleanup_planes(drm, state);
}
static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
.atomic_commit_tail = malidp_atomic_commit_tail,
};
static bool
malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (malidp_format_mod_supported(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0]) == false)
return false;
if (mode_cmd->offsets[0] != 0) {
DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
return false;
}
switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
case AFBC_SIZE_16X16:
if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
return false;
}
break;
default:
DRM_DEBUG_KMS("Unsupported AFBC block size\n");
return false;
}
return true;
}
static bool
malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int n_superblocks = 0;
const struct drm_format_info *info;
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
int bpp = 0;
switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
case AFBC_SIZE_16X16:
afbc_superblock_height = 16;
afbc_superblock_width = 16;
break;
default:
DRM_DEBUG_KMS("AFBC superblock size is not supported\n");
return false;
}
info = drm_get_format_info(dev, mode_cmd);
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
bpp = malidp_format_get_bpp(info->format);
afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height)
/ BITS_PER_BYTE;
afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) {
DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) "
"should be same as width (=%u) * bpp (=%u)\n",
(mode_cmd->pitches[0] * BITS_PER_BYTE),
mode_cmd->width, bpp);
return false;
}
objs = drm_gem_object_lookup(file, mode_cmd->handles[0]);
if (!objs) {
DRM_DEBUG_KMS("Failed to lookup GEM object\n");
return false;
}
if (objs->size < afbc_size) {
DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n",
objs->size, afbc_size);
drm_gem_object_put(objs);
return false;
}
drm_gem_object_put(objs);
return true;
}
static bool
malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
return false;
}
static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->modifier[0]) {
if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
return ERR_PTR(-EINVAL);
}
return drm_gem_fb_create(dev, file, mode_cmd);
}
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
.fb_create = malidp_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int malidp_init(struct drm_device *drm)
{
int ret;
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
ret = drmm_mode_config_init(drm);
if (ret)
goto out;
drm->mode_config.min_width = hwdev->min_line_size;
drm->mode_config.min_height = hwdev->min_line_size;
drm->mode_config.max_width = hwdev->max_line_size;
drm->mode_config.max_height = hwdev->max_line_size;
drm->mode_config.funcs = &malidp_mode_config_funcs;
drm->mode_config.helper_private = &malidp_mode_config_helpers;
ret = malidp_crtc_init(drm);
if (ret)
goto out;
ret = malidp_mw_connector_init(drm);
if (ret)
goto out;
out:
return ret;
}
static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
struct drm_device *drm = dev_get_drvdata(&pdev->dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
/* fetch the interrupts from DT */
irq_de = platform_get_irq_byname(pdev, "DE");
if (irq_de < 0) {
DRM_ERROR("no 'DE' IRQ specified!\n");
return irq_de;
}
irq_se = platform_get_irq_byname(pdev, "SE");
if (irq_se < 0) {
DRM_ERROR("no 'SE' IRQ specified!\n");
return irq_se;
}
ret = malidp_de_irq_init(drm, irq_de);
if (ret)
return ret;
ret = malidp_se_irq_init(drm, irq_se);
if (ret) {
malidp_de_irq_fini(hwdev);
return ret;
}
return 0;
}
DEFINE_DRM_GEM_DMA_FOPS(fops);
static int malidp_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
struct malidp_drm *malidp = drm_to_malidp(drm);
/* allocate for the worst case scenario, i.e. rotated buffers */
u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1);
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
#ifdef CONFIG_DEBUG_FS
static void malidp_error_stats_init(struct malidp_error_stats *error_stats)
{
error_stats->num_errors = 0;
error_stats->last_error_status = 0;
error_stats->last_error_vblank = -1;
}
void malidp_error(struct malidp_drm *malidp,
struct malidp_error_stats *error_stats, u32 status,
u64 vblank)
{
unsigned long irqflags;
spin_lock_irqsave(&malidp->errors_lock, irqflags);
error_stats->last_error_status = status;
error_stats->last_error_vblank = vblank;
error_stats->num_errors++;
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
}
static void malidp_error_stats_dump(const char *prefix,
struct malidp_error_stats error_stats,
struct seq_file *m)
{
seq_printf(m, "[%s] num_errors : %d\n", prefix,
error_stats.num_errors);
seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix,
error_stats.last_error_status);
seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix,
error_stats.last_error_vblank);
}
static int malidp_show_stats(struct seq_file *m, void *arg)
{
struct drm_device *drm = m->private;
struct malidp_drm *malidp = drm_to_malidp(drm);
unsigned long irqflags;
struct malidp_error_stats de_errors, se_errors;
spin_lock_irqsave(&malidp->errors_lock, irqflags);
de_errors = malidp->de_errors;
se_errors = malidp->se_errors;
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
malidp_error_stats_dump("DE", de_errors, m);
malidp_error_stats_dump("SE", se_errors, m);
return 0;
}
static int malidp_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, malidp_show_stats, inode->i_private);
}
static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_device *drm = m->private;
struct malidp_drm *malidp = drm_to_malidp(drm);
unsigned long irqflags;
spin_lock_irqsave(&malidp->errors_lock, irqflags);
malidp_error_stats_init(&malidp->de_errors);
malidp_error_stats_init(&malidp->se_errors);
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
return len;
}
static const struct file_operations malidp_debugfs_fops = {
.owner = THIS_MODULE,
.open = malidp_debugfs_open,
.read = seq_read,
.write = malidp_debugfs_write,
.llseek = seq_lseek,
.release = single_release,
};
static void malidp_debugfs_init(struct drm_minor *minor)
{
struct malidp_drm *malidp = drm_to_malidp(minor->dev);
malidp_error_stats_init(&malidp->de_errors);
malidp_error_stats_init(&malidp->se_errors);
spin_lock_init(&malidp->errors_lock);
debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &malidp_debugfs_fops);
}
#endif //CONFIG_DEBUG_FS
static const struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
.date = "20160106",
.major = 1,
.minor = 0,
};
static const struct of_device_id malidp_drm_of_match[] = {
{
.compatible = "arm,mali-dp500",
.data = &malidp_device[MALIDP_500]
},
{
.compatible = "arm,mali-dp550",
.data = &malidp_device[MALIDP_550]
},
{
.compatible = "arm,mali-dp650",
.data = &malidp_device[MALIDP_650]
},
{},
};
MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev,
const struct of_device_id *dev_id)
{
u32 core_id;
const char *compatstr_dp500 = "arm,mali-dp500";
bool is_dp500;
bool dt_is_dp500;
/*
* The DP500 CORE_ID register is in a different location, so check it
* first. If the product id field matches, then this is DP500, otherwise
* check the DP550/650 CORE_ID register.
*/
core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID);
/* Offset 0x18 will never read 0x500 on products other than DP500. */
is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500);
dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500,
sizeof(dev_id->compatible)) != NULL;
if (is_dp500 != dt_is_dp500) {
DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n",
dev_id->compatible, is_dp500 ? "is" : "is not");
return false;
} else if (!dt_is_dp500) {
u16 product_id;
char buf[32];
core_id = malidp_hw_read(hwdev,
MALIDP550_DC_BASE + MALIDP_DE_CORE_ID);
product_id = MALIDP_PRODUCT_ID(core_id);
snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id);
if (!strnstr(dev_id->compatible, buf,
sizeof(dev_id->compatible))) {
DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n",
dev_id->compatible, product_id);
return false;
}
}
return true;
}
static bool malidp_has_sufficient_address_space(const struct resource *res,
const struct of_device_id *dev_id)
{
resource_size_t res_size = resource_size(res);
const char *compatstr_dp500 = "arm,mali-dp500";
if (!strnstr(dev_id->compatible, compatstr_dp500,
sizeof(dev_id->compatible)))
return res_size >= MALIDP550_ADDR_SPACE_SIZE;
else if (res_size < MALIDP500_ADDR_SPACE_SIZE)
return false;
return true;
}
static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
return sysfs_emit(buf, "%08x\n", malidp->core_id);
}
static DEVICE_ATTR_RO(core_id);
static struct attribute *mali_dp_attrs[] = {
&dev_attr_core_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(mali_dp);
#define MAX_OUTPUT_CHANNELS 3
static int malidp_runtime_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
/* we can only suspend if the hardware is in config mode */
WARN_ON(!hwdev->hw->in_config_mode(hwdev));
malidp_se_irq_fini(hwdev);
malidp_de_irq_fini(hwdev);
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
clk_disable_unprepare(hwdev->aclk);
clk_disable_unprepare(hwdev->pclk);
return 0;
}
static int malidp_runtime_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
clk_prepare_enable(hwdev->pclk);
clk_prepare_enable(hwdev->aclk);
clk_prepare_enable(hwdev->mclk);
hwdev->pm_suspended = false;
malidp_de_irq_hw_init(hwdev);
malidp_se_irq_hw_init(hwdev);
return 0;
}
static int malidp_bind(struct device *dev)
{
struct resource *res;
struct drm_device *drm;
struct malidp_drm *malidp;
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
struct of_device_id const *dev_id;
struct drm_encoder *encoder;
/* number of lines for the R, G and B output */
u8 output_width[MAX_OUTPUT_CHANNELS];
int ret = 0, i;
u32 version, out_depth = 0;
malidp = devm_drm_dev_alloc(dev, &malidp_driver, typeof(*malidp), base);
if (IS_ERR(malidp))
return PTR_ERR(malidp);
drm = &malidp->base;
hwdev = drmm_kzalloc(drm, sizeof(*hwdev), GFP_KERNEL);
if (!hwdev)
return -ENOMEM;
hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
malidp->dev = hwdev;
hwdev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hwdev->regs))
return PTR_ERR(hwdev->regs);
hwdev->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(hwdev->pclk))
return PTR_ERR(hwdev->pclk);
hwdev->aclk = devm_clk_get(dev, "aclk");
if (IS_ERR(hwdev->aclk))
return PTR_ERR(hwdev->aclk);
hwdev->mclk = devm_clk_get(dev, "mclk");
if (IS_ERR(hwdev->mclk))
return PTR_ERR(hwdev->mclk);
hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
if (IS_ERR(hwdev->pxlclk))
return PTR_ERR(hwdev->pxlclk);
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
return ret;
dev_set_drvdata(dev, drm);
/* Enable power management */
pm_runtime_enable(dev);
/* Resume device to enable the clocks */
if (pm_runtime_enabled(dev))
pm_runtime_get_sync(dev);
else
malidp_runtime_pm_resume(dev);
dev_id = of_match_device(malidp_drm_of_match, dev);
if (!dev_id) {
ret = -EINVAL;
goto query_hw_fail;
}
if (!malidp_has_sufficient_address_space(res, dev_id)) {
DRM_ERROR("Insufficient address space in device-tree.\n");
ret = -EINVAL;
goto query_hw_fail;
}
if (!malidp_is_compatible_hw_id(hwdev, dev_id)) {
ret = -EINVAL;
goto query_hw_fail;
}
ret = hwdev->hw->query_hw(hwdev);
if (ret) {
DRM_ERROR("Invalid HW configuration\n");
goto query_hw_fail;
}
version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
(version >> 12) & 0xf, (version >> 8) & 0xf);
malidp->core_id = version;
ret = of_property_read_u32(dev->of_node,
"arm,malidp-arqos-value",
&hwdev->arqos_value);
if (ret)
hwdev->arqos_value = 0x0;
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
output_width, MAX_OUTPUT_CHANNELS);
if (ret)
goto query_hw_fail;
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
hwdev->output_color_depth = out_depth;
atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT);
init_waitqueue_head(&malidp->wq);
ret = malidp_init(drm);
if (ret < 0)
goto query_hw_fail;
/* Set the CRTC's port so that the encoder component can find it */
malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
goto bind_fail;
}
/* We expect to have a maximum of two encoders one for the actual
* display and a virtual one for the writeback connector
*/
WARN_ON(drm->mode_config.num_encoder > 2);
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
encoder->possible_clones =
(1 << drm->mode_config.num_encoder) - 1;
}
ret = malidp_irq_init(pdev);
if (ret < 0)
goto irq_init_fail;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
}
pm_runtime_put(dev);
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
ret = drm_dev_register(drm, 0);
if (ret)
goto register_fail;
drm_fbdev_dma_setup(drm, 32);
return 0;
register_fail:
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
vblank_fail:
malidp_se_irq_fini(hwdev);
malidp_de_irq_fini(hwdev);
irq_init_fail:
drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
query_hw_fail:
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
else
malidp_runtime_pm_suspend(dev);
dev_set_drvdata(dev, NULL);
of_reserved_mem_device_release(dev);
return ret;
}
static void malidp_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
struct malidp_hw_device *hwdev = malidp->dev;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
drm_atomic_helper_shutdown(drm);
malidp_se_irq_fini(hwdev);
malidp_de_irq_fini(hwdev);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
else
malidp_runtime_pm_suspend(dev);
dev_set_drvdata(dev, NULL);
of_reserved_mem_device_release(dev);
}
static const struct component_master_ops malidp_master_ops = {
.bind = malidp_bind,
.unbind = malidp_unbind,
};
static int malidp_compare_dev(struct device *dev, void *data)
{
struct device_node *np = data;
return dev->of_node == np;
}
static int malidp_platform_probe(struct platform_device *pdev)
{
struct device_node *port;
struct component_match *match = NULL;
if (!pdev->dev.of_node)
return -ENODEV;
/* there is only one output port inside each device, find it */
port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
if (!port)
return -ENODEV;
drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
port);
of_node_put(port);
return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
match);
}
static void malidp_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &malidp_master_ops);
}
static int __maybe_unused malidp_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused malidp_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
drm_mode_config_helper_resume(drm);
return 0;
}
static int __maybe_unused malidp_pm_suspend_late(struct device *dev)
{
if (!pm_runtime_status_suspended(dev)) {
malidp_runtime_pm_suspend(dev);
pm_runtime_set_suspended(dev);
}
return 0;
}
static int __maybe_unused malidp_pm_resume_early(struct device *dev)
{
malidp_runtime_pm_resume(dev);
pm_runtime_set_active(dev);
return 0;
}
static const struct dev_pm_ops malidp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \
SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
};
static struct platform_driver malidp_platform_driver = {
.probe = malidp_platform_probe,
.remove_new = malidp_platform_remove,
.driver = {
.name = "mali-dp",
.pm = &malidp_pm_ops,
.of_match_table = malidp_drm_of_match,
.dev_groups = mali_dp_groups,
},
};
drm_module_platform_driver(malidp_platform_driver);
MODULE_AUTHOR("Liviu Dudau <[email protected]>");
MODULE_DESCRIPTION("ARM Mali DP DRM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/arm/malidp_drv.c |
/*
* Copyright (C) 2013-2015 ARM Limited
* Author: Liviu Dudau <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* ARM HDLCD Driver
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
struct hdlcd_drm_private *hdlcd = arg;
unsigned long irq_status;
irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
#ifdef CONFIG_DEBUG_FS
if (irq_status & HDLCD_INTERRUPT_UNDERRUN)
atomic_inc(&hdlcd->buffer_underrun_count);
if (irq_status & HDLCD_INTERRUPT_DMA_END)
atomic_inc(&hdlcd->dma_end_count);
if (irq_status & HDLCD_INTERRUPT_BUS_ERROR)
atomic_inc(&hdlcd->bus_error_count);
if (irq_status & HDLCD_INTERRUPT_VSYNC)
atomic_inc(&hdlcd->vsync_count);
#endif
if (irq_status & HDLCD_INTERRUPT_VSYNC)
drm_crtc_handle_vblank(&hdlcd->crtc);
/* acknowledge interrupt(s) */
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
return IRQ_HANDLED;
}
static int hdlcd_irq_install(struct hdlcd_drm_private *hdlcd)
{
int ret;
/* Ensure interrupts are disabled */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
ret = request_irq(hdlcd->irq, hdlcd_irq, 0, "hdlcd", hdlcd);
if (ret)
return ret;
#ifdef CONFIG_DEBUG_FS
/* enable debug interrupts */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, HDLCD_DEBUG_INT_MASK);
#endif
return 0;
}
static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd)
{
/* disable all the interrupts that we might have enabled */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
free_irq(hdlcd->irq, hdlcd);
}
static int hdlcd_load(struct drm_device *drm, unsigned long flags)
{
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
u32 version;
int ret;
hdlcd->clk = devm_clk_get(drm->dev, "pxlclk");
if (IS_ERR(hdlcd->clk))
return PTR_ERR(hdlcd->clk);
#ifdef CONFIG_DEBUG_FS
atomic_set(&hdlcd->buffer_underrun_count, 0);
atomic_set(&hdlcd->bus_error_count, 0);
atomic_set(&hdlcd->vsync_count, 0);
atomic_set(&hdlcd->dma_end_count, 0);
#endif
hdlcd->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdlcd->mmio)) {
DRM_ERROR("failed to map control registers area\n");
ret = PTR_ERR(hdlcd->mmio);
hdlcd->mmio = NULL;
return ret;
}
version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
DRM_ERROR("unknown product id: 0x%x\n", version);
return -EINVAL;
}
DRM_INFO("found ARM HDLCD version r%dp%d\n",
(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
version & HDLCD_VERSION_MINOR_MASK);
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
return ret;
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
if (ret)
goto setup_fail;
ret = hdlcd_setup_crtc(drm);
if (ret < 0) {
DRM_ERROR("failed to create crtc\n");
goto setup_fail;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto irq_fail;
hdlcd->irq = ret;
ret = hdlcd_irq_install(hdlcd);
if (ret < 0) {
DRM_ERROR("failed to install IRQ handler\n");
goto irq_fail;
}
return 0;
irq_fail:
drm_crtc_cleanup(&hdlcd->crtc);
setup_fail:
of_reserved_mem_device_release(drm->dev);
return ret;
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int hdlcd_setup_mode_config(struct drm_device *drm)
{
int ret;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = HDLCD_MAX_XRES;
drm->mode_config.max_height = HDLCD_MAX_YRES;
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count));
seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count));
seq_printf(m, "vsync : %d\n", atomic_read(&hdlcd->vsync_count));
return 0;
}
static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
unsigned long clkrate = clk_get_rate(hdlcd->clk);
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
seq_printf(m, "hw : %lu\n", clkrate);
seq_printf(m, "mode: %lu\n", mode_clock);
return 0;
}
static struct drm_debugfs_info hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
};
#endif
DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
.date = "20151021",
.major = 1,
.minor = 0,
};
static int hdlcd_drm_bind(struct device *dev)
{
struct drm_device *drm;
struct hdlcd_drm_private *hdlcd;
int ret;
hdlcd = devm_drm_dev_alloc(dev, &hdlcd_driver, typeof(*hdlcd), base);
if (IS_ERR(hdlcd))
return PTR_ERR(hdlcd);
drm = &hdlcd->base;
dev_set_drvdata(dev, drm);
ret = hdlcd_setup_mode_config(drm);
if (ret)
goto err_free;
ret = hdlcd_load(drm, 0);
if (ret)
goto err_free;
/* Set the CRTC's port so that the encoder component can find it */
hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
goto err_unload;
}
ret = pm_runtime_set_active(dev);
if (ret)
goto err_pm_active;
pm_runtime_enable(dev);
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto err_vblank;
}
/*
* If EFI left us running, take over from simple framebuffer
* drivers. Read HDLCD_REG_COMMAND to see if we are enabled.
*/
if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
drm_aperture_remove_framebuffers(&hdlcd_driver);
}
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
#ifdef CONFIG_DEBUG_FS
drm_debugfs_add_files(drm, hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list));
#endif
ret = drm_dev_register(drm, 0);
if (ret)
goto err_register;
drm_fbdev_dma_setup(drm, 32);
return 0;
err_register:
drm_kms_helper_poll_fini(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
err_unload:
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
hdlcd_irq_uninstall(hdlcd);
of_reserved_mem_device_release(drm->dev);
err_free:
dev_set_drvdata(dev, NULL);
return ret;
}
static void hdlcd_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
pm_runtime_get_sync(dev);
drm_atomic_helper_shutdown(drm);
hdlcd_irq_uninstall(hdlcd);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
of_reserved_mem_device_release(dev);
dev_set_drvdata(dev, NULL);
}
static const struct component_master_ops hdlcd_master_ops = {
.bind = hdlcd_drm_bind,
.unbind = hdlcd_drm_unbind,
};
static int compare_dev(struct device *dev, void *data)
{
return dev->of_node == data;
}
static int hdlcd_probe(struct platform_device *pdev)
{
struct device_node *port;
struct component_match *match = NULL;
/* there is only one output port inside each device, find it */
port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
if (!port)
return -ENODEV;
drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
}
static void hdlcd_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &hdlcd_master_ops);
}
static const struct of_device_id hdlcd_of_match[] = {
{ .compatible = "arm,hdlcd" },
{},
};
MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
drm_mode_config_helper_resume(drm);
return 0;
}
static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
static struct platform_driver hdlcd_platform_driver = {
.probe = hdlcd_probe,
.remove_new = hdlcd_remove,
.driver = {
.name = "hdlcd",
.pm = &hdlcd_pm_ops,
.of_match_table = hdlcd_of_match,
},
};
drm_module_platform_driver(hdlcd_platform_driver);
MODULE_AUTHOR("Liviu Dudau");
MODULE_DESCRIPTION("ARM HDLCD DRM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/arm/hdlcd_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <linux/of.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_pipeline.h"
/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
struct komeda_pipeline *
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
const struct komeda_pipeline_funcs *funcs)
{
struct komeda_pipeline *pipe;
if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) {
DRM_ERROR("Exceed max support %d pipelines.\n",
KOMEDA_MAX_PIPELINES);
return ERR_PTR(-ENOSPC);
}
if (size < sizeof(*pipe)) {
DRM_ERROR("Request pipeline size too small.\n");
return ERR_PTR(-EINVAL);
}
pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
if (!pipe)
return ERR_PTR(-ENOMEM);
pipe->mdev = mdev;
pipe->id = mdev->n_pipelines;
pipe->funcs = funcs;
mdev->pipelines[mdev->n_pipelines] = pipe;
mdev->n_pipelines++;
return pipe;
}
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe)
{
struct komeda_component *c;
int i;
unsigned long avail_comps = pipe->avail_comps;
for_each_set_bit(i, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, i);
komeda_component_destroy(mdev, c);
}
clk_put(pipe->pxlclk);
of_node_put(pipe->of_output_links[0]);
of_node_put(pipe->of_output_links[1]);
of_node_put(pipe->of_output_port);
of_node_put(pipe->of_node);
devm_kfree(mdev->dev, pipe);
}
static struct komeda_component **
komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id)
{
struct komeda_dev *mdev = pipe->mdev;
struct komeda_pipeline *temp = NULL;
struct komeda_component **pos = NULL;
switch (id) {
case KOMEDA_COMPONENT_LAYER0:
case KOMEDA_COMPONENT_LAYER1:
case KOMEDA_COMPONENT_LAYER2:
case KOMEDA_COMPONENT_LAYER3:
pos = to_cpos(pipe->layers[id - KOMEDA_COMPONENT_LAYER0]);
break;
case KOMEDA_COMPONENT_WB_LAYER:
pos = to_cpos(pipe->wb_layer);
break;
case KOMEDA_COMPONENT_COMPIZ0:
case KOMEDA_COMPONENT_COMPIZ1:
temp = mdev->pipelines[id - KOMEDA_COMPONENT_COMPIZ0];
if (!temp) {
DRM_ERROR("compiz-%d doesn't exist.\n", id);
return NULL;
}
pos = to_cpos(temp->compiz);
break;
case KOMEDA_COMPONENT_SCALER0:
case KOMEDA_COMPONENT_SCALER1:
pos = to_cpos(pipe->scalers[id - KOMEDA_COMPONENT_SCALER0]);
break;
case KOMEDA_COMPONENT_SPLITTER:
pos = to_cpos(pipe->splitter);
break;
case KOMEDA_COMPONENT_MERGER:
pos = to_cpos(pipe->merger);
break;
case KOMEDA_COMPONENT_IPS0:
case KOMEDA_COMPONENT_IPS1:
temp = mdev->pipelines[id - KOMEDA_COMPONENT_IPS0];
if (!temp) {
DRM_ERROR("ips-%d doesn't exist.\n", id);
return NULL;
}
pos = to_cpos(temp->improc);
break;
case KOMEDA_COMPONENT_TIMING_CTRLR:
pos = to_cpos(pipe->ctrlr);
break;
default:
pos = NULL;
DRM_ERROR("Unknown pipeline resource ID: %d.\n", id);
break;
}
return pos;
}
struct komeda_component *
komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
{
struct komeda_component **pos = NULL;
struct komeda_component *c = NULL;
pos = komeda_pipeline_get_component_pos(pipe, id);
if (pos)
c = *pos;
return c;
}
struct komeda_component *
komeda_pipeline_get_first_component(struct komeda_pipeline *pipe,
u32 comp_mask)
{
struct komeda_component *c = NULL;
unsigned long comp_mask_local = (unsigned long)comp_mask;
int id;
id = find_first_bit(&comp_mask_local, 32);
if (id < 32)
c = komeda_pipeline_get_component(pipe, id);
return c;
}
static struct komeda_component *
komeda_component_pickup_input(struct komeda_component *c, u32 avail_comps)
{
u32 avail_inputs = c->supported_inputs & (avail_comps);
return komeda_pipeline_get_first_component(c->pipeline, avail_inputs);
}
/** komeda_component_add - Add a component to &komeda_pipeline */
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
const struct komeda_component_funcs *funcs,
u8 max_active_inputs, u32 supported_inputs,
u8 max_active_outputs, u32 __iomem *reg,
const char *name_fmt, ...)
{
struct komeda_component **pos;
struct komeda_component *c;
int idx, *num = NULL;
if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) {
WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n",
max_active_inputs);
return ERR_PTR(-ENOSPC);
}
pos = komeda_pipeline_get_component_pos(pipe, id);
if (!pos || (*pos))
return ERR_PTR(-EINVAL);
if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) {
idx = id - KOMEDA_COMPONENT_LAYER0;
num = &pipe->n_layers;
if (idx != pipe->n_layers) {
DRM_ERROR("please add Layer by id sequence.\n");
return ERR_PTR(-EINVAL);
}
} else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) {
idx = id - KOMEDA_COMPONENT_SCALER0;
num = &pipe->n_scalers;
if (idx != pipe->n_scalers) {
DRM_ERROR("please add Scaler by id sequence.\n");
return ERR_PTR(-EINVAL);
}
}
c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->id = id;
c->hw_id = hw_id;
c->reg = reg;
c->pipeline = pipe;
c->max_active_inputs = max_active_inputs;
c->max_active_outputs = max_active_outputs;
c->supported_inputs = supported_inputs;
c->funcs = funcs;
if (name_fmt) {
va_list args;
va_start(args, name_fmt);
vsnprintf(c->name, sizeof(c->name), name_fmt, args);
va_end(args);
}
if (num)
*num = *num + 1;
pipe->avail_comps |= BIT(c->id);
*pos = c;
return c;
}
void komeda_component_destroy(struct komeda_dev *mdev,
struct komeda_component *c)
{
devm_kfree(mdev->dev, c);
}
static void komeda_component_dump(struct komeda_component *c)
{
if (!c)
return;
DRM_DEBUG(" %s: ID %d-0x%08lx.\n",
c->name, c->id, BIT(c->id));
DRM_DEBUG(" max_active_inputs:%d, supported_inputs: 0x%08x.\n",
c->max_active_inputs, c->supported_inputs);
DRM_DEBUG(" max_active_outputs:%d, supported_outputs: 0x%08x.\n",
c->max_active_outputs, c->supported_outputs);
}
static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
{
struct komeda_component *c;
int id;
unsigned long avail_comps = pipe->avail_comps;
DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
pipe->id, pipe->n_layers, pipe->n_scalers,
pipe->dual_link ? "dual-link" : "single-link");
DRM_INFO(" output_link[0]: %s.\n",
pipe->of_output_links[0] ?
pipe->of_output_links[0]->full_name : "none");
DRM_INFO(" output_link[1]: %s.\n",
pipe->of_output_links[1] ?
pipe->of_output_links[1]->full_name : "none");
for_each_set_bit(id, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
komeda_component_dump(c);
}
}
static void komeda_component_verify_inputs(struct komeda_component *c)
{
struct komeda_pipeline *pipe = c->pipeline;
struct komeda_component *input;
int id;
unsigned long supported_inputs = c->supported_inputs;
for_each_set_bit(id, &supported_inputs, 32) {
input = komeda_pipeline_get_component(pipe, id);
if (!input) {
c->supported_inputs &= ~(BIT(id));
DRM_WARN("Can not find input(ID-%d) for component: %s.\n",
id, c->name);
continue;
}
input->supported_outputs |= BIT(c->id);
}
}
static struct komeda_layer *
komeda_get_layer_split_right_layer(struct komeda_pipeline *pipe,
struct komeda_layer *left)
{
int index = left->base.id - KOMEDA_COMPONENT_LAYER0;
int i;
for (i = index + 1; i < pipe->n_layers; i++)
if (left->layer_type == pipe->layers[i]->layer_type)
return pipe->layers[i];
return NULL;
}
static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
{
struct komeda_component *c;
struct komeda_layer *layer;
int i, id;
unsigned long avail_comps = pipe->avail_comps;
for_each_set_bit(id, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
komeda_component_verify_inputs(c);
}
/* calculate right layer for the layer split */
for (i = 0; i < pipe->n_layers; i++) {
layer = pipe->layers[i];
layer->right = komeda_get_layer_split_right_layer(pipe, layer);
}
if (pipe->dual_link && !pipe->ctrlr->supports_dual_link) {
pipe->dual_link = false;
DRM_WARN("PIPE-%d doesn't support dual-link, ignore DT dual-link configuration.\n",
pipe->id);
}
}
/* if pipeline_A accept another pipeline_B's component as input, treat
* pipeline_B as slave of pipeline_A.
*/
struct komeda_pipeline *
komeda_pipeline_get_slave(struct komeda_pipeline *master)
{
struct komeda_component *slave;
slave = komeda_component_pickup_input(&master->compiz->base,
KOMEDA_PIPELINE_COMPIZS);
return slave ? slave->pipeline : NULL;
}
int komeda_assemble_pipelines(struct komeda_dev *mdev)
{
struct komeda_pipeline *pipe;
int i;
for (i = 0; i < mdev->n_pipelines; i++) {
pipe = mdev->pipelines[i];
komeda_pipeline_assemble(pipe);
komeda_pipeline_dump(pipe);
}
return 0;
}
void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
struct seq_file *sf)
{
struct komeda_component *c;
u32 id;
unsigned long avail_comps;
seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
if (pipe->funcs && pipe->funcs->dump_register)
pipe->funcs->dump_register(pipe, sf);
avail_comps = pipe->avail_comps;
for_each_set_bit(id, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
seq_printf(sf, "\n------%s------\n", c->name);
if (c->funcs->dump_register)
c->funcs->dump_register(c, sf);
}
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
#include "komeda_framebuffer.h"
static int
komeda_plane_init_data_flow(struct drm_plane_state *st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_plane *kplane = to_kplane(st->plane);
struct drm_framebuffer *fb = st->fb;
const struct komeda_format_caps *caps = to_kfb(fb)->format_caps;
struct komeda_pipeline *pipe = kplane->layer->base.pipeline;
memset(dflow, 0, sizeof(*dflow));
dflow->blending_zorder = st->normalized_zpos;
if (pipe == to_kcrtc(st->crtc)->master)
dflow->blending_zorder -= kcrtc_st->max_slave_zorder;
if (dflow->blending_zorder < 0) {
DRM_DEBUG_ATOMIC("%s zorder:%d < max_slave_zorder: %d.\n",
st->plane->name, st->normalized_zpos,
kcrtc_st->max_slave_zorder);
return -EINVAL;
}
dflow->pixel_blend_mode = st->pixel_blend_mode;
dflow->layer_alpha = st->alpha >> 8;
dflow->out_x = st->crtc_x;
dflow->out_y = st->crtc_y;
dflow->out_w = st->crtc_w;
dflow->out_h = st->crtc_h;
dflow->in_x = st->src_x >> 16;
dflow->in_y = st->src_y >> 16;
dflow->in_w = st->src_w >> 16;
dflow->in_h = st->src_h >> 16;
dflow->rot = drm_rotation_simplify(st->rotation, caps->supported_rots);
if (!has_bits(dflow->rot, caps->supported_rots)) {
DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %p4cc with modifier: 0x%llx.\n",
dflow->rot, &caps->fourcc, fb->modifier);
return -EINVAL;
}
komeda_complete_data_flow_cfg(kplane->layer, dflow, fb);
return 0;
}
/**
* komeda_plane_atomic_check - build input data flow
* @plane: DRM plane
* @state: the plane state object
*
* RETURNS:
* Zero for success or -errno
*/
static int
komeda_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct komeda_plane *kplane = to_kplane(plane);
struct komeda_plane_state *kplane_st = to_kplane_st(new_plane_state);
struct komeda_layer *layer = kplane->layer;
struct drm_crtc_state *crtc_st;
struct komeda_crtc_state *kcrtc_st;
struct komeda_data_flow_cfg dflow;
int err;
if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
crtc_st = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_st) || !crtc_st->enable) {
DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
return -EINVAL;
}
/* crtc is inactive, skip the resource assignment */
if (!crtc_st->active)
return 0;
kcrtc_st = to_kcrtc_st(crtc_st);
err = komeda_plane_init_data_flow(new_plane_state, kcrtc_st, &dflow);
if (err)
return err;
if (dflow.en_split)
err = komeda_build_layer_split_data_flow(layer,
kplane_st, kcrtc_st, &dflow);
else
err = komeda_build_layer_data_flow(layer,
kplane_st, kcrtc_st, &dflow);
return err;
}
/* plane doesn't represent a real HW, so there is no HW update for plane.
* komeda handles all the HW update in crtc->atomic_flush
*/
static void
komeda_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
}
static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = {
.atomic_check = komeda_plane_atomic_check,
.atomic_update = komeda_plane_atomic_update,
};
static void komeda_plane_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
kfree(to_kplane(plane));
}
static void komeda_plane_reset(struct drm_plane *plane)
{
struct komeda_plane_state *state;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_plane_reset(plane, &state->base);
}
static struct drm_plane_state *
komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct komeda_plane_state *new;
if (WARN_ON(!plane->state))
return NULL;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
return &new->base;
}
static void
komeda_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_kplane_st(state));
}
static bool
komeda_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
struct komeda_dev *mdev = plane->dev->dev_private;
struct komeda_plane *kplane = to_kplane(plane);
u32 layer_type = kplane->layer->layer_type;
return komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
format, modifier, 0);
}
static const struct drm_plane_funcs komeda_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = komeda_plane_destroy,
.reset = komeda_plane_reset,
.atomic_duplicate_state = komeda_plane_atomic_duplicate_state,
.atomic_destroy_state = komeda_plane_atomic_destroy_state,
.format_mod_supported = komeda_plane_format_mod_supported,
};
/* for komeda, which is pipeline can be share between crtcs */
static u32 get_possible_crtcs(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe)
{
struct komeda_crtc *crtc;
u32 possible_crtcs = 0;
int i;
for (i = 0; i < kms->n_crtcs; i++) {
crtc = &kms->crtcs[i];
if ((pipe == crtc->master) || (pipe == crtc->slave))
possible_crtcs |= BIT(i);
}
return possible_crtcs;
}
static void
komeda_set_crtc_plane_mask(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe,
struct drm_plane *plane)
{
struct komeda_crtc *kcrtc;
int i;
for (i = 0; i < kms->n_crtcs; i++) {
kcrtc = &kms->crtcs[i];
if (pipe == kcrtc->slave)
kcrtc->slave_planes |= BIT(drm_plane_index(plane));
}
}
/* use Layer0 as primary */
static u32 get_plane_type(struct komeda_kms_dev *kms,
struct komeda_component *c)
{
bool is_primary = (c->id == KOMEDA_COMPONENT_LAYER0);
return is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
}
static int komeda_plane_add(struct komeda_kms_dev *kms,
struct komeda_layer *layer)
{
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_component *c = &layer->base;
struct komeda_plane *kplane;
struct drm_plane *plane;
u32 *formats, n_formats = 0;
int err;
kplane = kzalloc(sizeof(*kplane), GFP_KERNEL);
if (!kplane)
return -ENOMEM;
plane = &kplane->base;
kplane->layer = layer;
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
layer->layer_type, &n_formats);
if (!formats) {
kfree(kplane);
return -ENOMEM;
}
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
&komeda_plane_funcs,
formats, n_formats, komeda_supported_modifiers,
get_plane_type(kms, c),
"%s", c->name);
komeda_put_fourcc_list(formats);
if (err) {
kfree(kplane);
return err;
}
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
err = drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
layer->supported_rots);
if (err)
goto cleanup;
err = drm_plane_create_alpha_property(plane);
if (err)
goto cleanup;
err = drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
if (err)
goto cleanup;
err = drm_plane_create_color_properties(plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (err)
goto cleanup;
err = drm_plane_create_zpos_property(plane, layer->base.id, 0, 8);
if (err)
goto cleanup;
komeda_set_crtc_plane_mask(kms, c->pipeline, plane);
return 0;
cleanup:
komeda_plane_destroy(plane);
return err;
}
int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
{
struct komeda_pipeline *pipe;
int i, j, err;
for (i = 0; i < mdev->n_pipelines; i++) {
pipe = mdev->pipelines[i];
for (j = 0; j < pipe->n_layers; j++) {
err = komeda_plane_add(kms, pipe->layers[j]);
if (err)
return err;
}
}
return 0;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_plane.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include "komeda_dev.h"
#include "komeda_kms.h"
static void
komeda_component_state_reset(struct komeda_component_state *st)
{
st->binding_user = NULL;
st->affected_inputs = st->active_inputs;
st->active_inputs = 0;
st->changed_active_inputs = 0;
}
static struct drm_private_state *
komeda_layer_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_layer_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_layer_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct komeda_layer_state *st = to_layer_st(priv_to_comp_st(state));
kfree(st);
}
static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
.atomic_duplicate_state = komeda_layer_atomic_duplicate_state,
.atomic_destroy_state = komeda_layer_atomic_destroy_state,
};
static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
struct komeda_layer *layer)
{
struct komeda_layer_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &layer->base;
drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj,
&komeda_layer_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_scaler_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_scaler_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_scaler_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_scaler_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_scaler_obj_funcs = {
.atomic_duplicate_state = komeda_scaler_atomic_duplicate_state,
.atomic_destroy_state = komeda_scaler_atomic_destroy_state,
};
static int komeda_scaler_obj_add(struct komeda_kms_dev *kms,
struct komeda_scaler *scaler)
{
struct komeda_scaler_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &scaler->base;
drm_atomic_private_obj_init(&kms->base,
&scaler->base.obj, &st->base.obj,
&komeda_scaler_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_compiz_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_compiz_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_compiz_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
.atomic_duplicate_state = komeda_compiz_atomic_duplicate_state,
.atomic_destroy_state = komeda_compiz_atomic_destroy_state,
};
static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
struct komeda_compiz *compiz)
{
struct komeda_compiz_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &compiz->base;
drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj,
&komeda_compiz_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_splitter_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_splitter_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_splitter_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_splitter_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_splitter_obj_funcs = {
.atomic_duplicate_state = komeda_splitter_atomic_duplicate_state,
.atomic_destroy_state = komeda_splitter_atomic_destroy_state,
};
static int komeda_splitter_obj_add(struct komeda_kms_dev *kms,
struct komeda_splitter *splitter)
{
struct komeda_splitter_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &splitter->base;
drm_atomic_private_obj_init(&kms->base,
&splitter->base.obj, &st->base.obj,
&komeda_splitter_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_merger_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_merger_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void komeda_merger_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_merger_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_merger_obj_funcs = {
.atomic_duplicate_state = komeda_merger_atomic_duplicate_state,
.atomic_destroy_state = komeda_merger_atomic_destroy_state,
};
static int komeda_merger_obj_add(struct komeda_kms_dev *kms,
struct komeda_merger *merger)
{
struct komeda_merger_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &merger->base;
drm_atomic_private_obj_init(&kms->base,
&merger->base.obj, &st->base.obj,
&komeda_merger_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_improc_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_improc_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_improc_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_improc_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
.atomic_duplicate_state = komeda_improc_atomic_duplicate_state,
.atomic_destroy_state = komeda_improc_atomic_destroy_state,
};
static int komeda_improc_obj_add(struct komeda_kms_dev *kms,
struct komeda_improc *improc)
{
struct komeda_improc_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &improc->base;
drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj,
&komeda_improc_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_timing_ctrlr_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_timing_ctrlr_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_ctrlr_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
.atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state,
.atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state,
};
static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms,
struct komeda_timing_ctrlr *ctrlr)
{
struct komeda_compiz_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &ctrlr->base;
drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj,
&komeda_timing_ctrlr_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_pipeline_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
st->active_comps = 0;
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->obj);
return &st->obj;
}
static void
komeda_pipeline_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(priv_to_pipe_st(state));
}
static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = {
.atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state,
.atomic_destroy_state = komeda_pipeline_atomic_destroy_state,
};
static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe)
{
struct komeda_pipeline_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->pipe = pipe;
drm_atomic_private_obj_init(&kms->base, &pipe->obj, &st->obj,
&komeda_pipeline_obj_funcs);
return 0;
}
int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
struct komeda_pipeline *pipe;
int i, j, err;
for (i = 0; i < mdev->n_pipelines; i++) {
pipe = mdev->pipelines[i];
err = komeda_pipeline_obj_add(kms, pipe);
if (err)
return err;
for (j = 0; j < pipe->n_layers; j++) {
err = komeda_layer_obj_add(kms, pipe->layers[j]);
if (err)
return err;
}
if (pipe->wb_layer) {
err = komeda_layer_obj_add(kms, pipe->wb_layer);
if (err)
return err;
}
for (j = 0; j < pipe->n_scalers; j++) {
err = komeda_scaler_obj_add(kms, pipe->scalers[j]);
if (err)
return err;
}
err = komeda_compiz_obj_add(kms, pipe->compiz);
if (err)
return err;
if (pipe->splitter) {
err = komeda_splitter_obj_add(kms, pipe->splitter);
if (err)
return err;
}
if (pipe->merger) {
err = komeda_merger_obj_add(kms, pipe->merger);
if (err)
return err;
}
err = komeda_improc_obj_add(kms, pipe->improc);
if (err)
return err;
err = komeda_timing_ctrlr_obj_add(kms, pipe->ctrlr);
if (err)
return err;
}
return 0;
}
void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms)
{
struct drm_mode_config *config = &kms->base.mode_config;
struct drm_private_obj *obj, *next;
list_for_each_entry_safe(obj, next, &config->privobj_list, head)
drm_atomic_private_obj_fini(obj);
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <drm/drm_print.h>
#include <linux/clk.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
#include "komeda_pipeline.h"
#include "komeda_framebuffer.h"
static inline bool is_switching_user(void *old, void *new)
{
if (!old || !new)
return false;
return old != new;
}
static struct komeda_pipeline_state *
komeda_pipeline_get_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state)
{
struct drm_private_state *priv_st;
priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
if (IS_ERR(priv_st))
return ERR_CAST(priv_st);
return priv_to_pipe_st(priv_st);
}
struct komeda_pipeline_state *
komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state)
{
struct drm_private_state *priv_st;
priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
if (priv_st)
return priv_to_pipe_st(priv_st);
return NULL;
}
static struct komeda_pipeline_state *
komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state)
{
struct drm_private_state *priv_st;
priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
if (priv_st)
return priv_to_pipe_st(priv_st);
return NULL;
}
/* Assign pipeline for crtc */
static struct komeda_pipeline_state *
komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct komeda_pipeline_state *st;
st = komeda_pipeline_get_state(pipe, state);
if (IS_ERR(st))
return st;
if (is_switching_user(crtc, st->crtc)) {
DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
drm_crtc_index(crtc), pipe->id);
return ERR_PTR(-EBUSY);
}
/* pipeline only can be disabled when the it is free or unused */
if (!crtc && st->active_comps) {
DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
return ERR_PTR(-EBUSY);
}
st->crtc = crtc;
if (crtc) {
struct komeda_crtc_state *kcrtc_st;
kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
crtc));
kcrtc_st->active_pipes |= BIT(pipe->id);
kcrtc_st->affected_pipes |= BIT(pipe->id);
}
return st;
}
static struct komeda_component_state *
komeda_component_get_state(struct komeda_component *c,
struct drm_atomic_state *state)
{
struct drm_private_state *priv_st;
WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
if (IS_ERR(priv_st))
return ERR_CAST(priv_st);
return priv_to_comp_st(priv_st);
}
static struct komeda_component_state *
komeda_component_get_old_state(struct komeda_component *c,
struct drm_atomic_state *state)
{
struct drm_private_state *priv_st;
priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
if (priv_st)
return priv_to_comp_st(priv_st);
return NULL;
}
/**
* komeda_component_get_state_and_set_user()
*
* @c: component to get state and set user
* @state: global atomic state
* @user: direct user, the binding user
* @crtc: the CRTC user, the big boss :)
*
* This function accepts two users:
* - The direct user: can be plane/crtc/wb_connector depends on component
* - The big boss (CRTC)
* CRTC is the big boss (the final user), because all component resources
* eventually will be assigned to CRTC, like the layer will be binding to
* kms_plane, but kms plane will be binding to a CRTC eventually.
*
* The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
* independent and can be assigned to CRTC freely, but belongs to a specific
* pipeline, only pipeline can be shared between crtc, and pipeline as a whole
* (include all the internal components) assigned to a specific CRTC.
*
* So when set a user to komeda_component, need first to check the status of
* component->pipeline to see if the pipeline is available on this specific
* CRTC. if the pipeline is busy (assigned to another CRTC), even the required
* component is free, the component still cannot be assigned to the direct user.
*/
static struct komeda_component_state *
komeda_component_get_state_and_set_user(struct komeda_component *c,
struct drm_atomic_state *state,
void *user,
struct drm_crtc *crtc)
{
struct komeda_pipeline_state *pipe_st;
struct komeda_component_state *st;
/* First check if the pipeline is available */
pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
state, crtc);
if (IS_ERR(pipe_st))
return ERR_CAST(pipe_st);
st = komeda_component_get_state(c, state);
if (IS_ERR(st))
return st;
/* check if the component has been occupied */
if (is_switching_user(user, st->binding_user)) {
DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
return ERR_PTR(-EBUSY);
}
st->binding_user = user;
/* mark the component as active if user is valid */
if (st->binding_user)
pipe_st->active_comps |= BIT(c->id);
return st;
}
static void
komeda_component_add_input(struct komeda_component_state *state,
struct komeda_component_output *input,
int idx)
{
struct komeda_component *c = state->component;
WARN_ON((idx < 0 || idx >= c->max_active_inputs));
/* since the inputs[i] is only valid when it is active. So if a input[i]
* is a newly enabled input which switches from disable to enable, then
* the old inputs[i] is undefined (NOT zeroed), we can not rely on
* memcmp, but directly mark it changed
*/
if (!has_bit(idx, state->affected_inputs) ||
memcmp(&state->inputs[idx], input, sizeof(*input))) {
memcpy(&state->inputs[idx], input, sizeof(*input));
state->changed_active_inputs |= BIT(idx);
}
state->active_inputs |= BIT(idx);
state->affected_inputs |= BIT(idx);
}
static int
komeda_component_check_input(struct komeda_component_state *state,
struct komeda_component_output *input,
int idx)
{
struct komeda_component *c = state->component;
if ((idx < 0) || (idx >= c->max_active_inputs)) {
DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n",
input->component->name, c->name, idx);
return -EINVAL;
}
if (has_bit(idx, state->active_inputs)) {
DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n",
input->component->name, c->name, idx);
return -EINVAL;
}
return 0;
}
static void
komeda_component_set_output(struct komeda_component_output *output,
struct komeda_component *comp,
u8 output_port)
{
output->component = comp;
output->output_port = output_port;
}
static int
komeda_component_validate_private(struct komeda_component *c,
struct komeda_component_state *st)
{
int err;
if (!c->funcs->validate)
return 0;
err = c->funcs->validate(c, st);
if (err)
DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
return err;
}
/* Get current available scaler from the component->supported_outputs */
static struct komeda_scaler *
komeda_component_get_avail_scaler(struct komeda_component *c,
struct drm_atomic_state *state)
{
struct komeda_pipeline_state *pipe_st;
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
if (!pipe_st)
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
KOMEDA_PIPELINE_SCALERS;
c = komeda_component_pickup_output(c, avail_scalers);
return to_scaler(c);
}
static void
komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot)
{
if (drm_rotation_90_or_270(rot)) {
swap(dflow->in_h, dflow->in_w);
swap(dflow->total_in_h, dflow->total_in_w);
}
}
static int
komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_fb *kfb,
struct komeda_data_flow_cfg *dflow)
{
u32 src_x, src_y, src_w, src_h;
u32 line_sz, max_line_sz;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
src_x = dflow->out_x;
src_y = dflow->out_y;
src_w = dflow->out_w;
src_h = dflow->out_h;
} else {
src_x = dflow->in_x;
src_y = dflow->in_y;
src_w = dflow->in_w;
src_h = dflow->in_h;
}
if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
return -EINVAL;
if (!malidp_in_range(&layer->hsize_in, src_w)) {
DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
return -EINVAL;
}
if (!malidp_in_range(&layer->vsize_in, src_h)) {
DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
return -EINVAL;
}
if (drm_rotation_90_or_270(dflow->rot))
line_sz = dflow->in_h;
else
line_sz = dflow->in_w;
if (kfb->base.format->hsub > 1)
max_line_sz = layer->yuv_line_sz;
else
max_line_sz = layer->line_sz;
if (line_sz > max_line_sz) {
DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
line_sz, max_line_sz);
return -EINVAL;
}
return 0;
}
static int
komeda_layer_validate(struct komeda_layer *layer,
struct komeda_plane_state *kplane_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_plane_state *plane_st = &kplane_st->base;
struct drm_framebuffer *fb = plane_st->fb;
struct komeda_fb *kfb = to_kfb(fb);
struct komeda_component_state *c_st;
struct komeda_layer_state *st;
int i, err;
err = komeda_layer_check_cfg(layer, kfb, dflow);
if (err)
return err;
c_st = komeda_component_get_state_and_set_user(&layer->base,
plane_st->state, plane_st->plane, plane_st->crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_layer_st(c_st);
st->rot = dflow->rot;
if (fb->modifier) {
st->hsize = kfb->aligned_w;
st->vsize = kfb->aligned_h;
st->afbc_crop_l = dflow->in_x;
st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
st->afbc_crop_t = dflow->in_y;
st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
} else {
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
st->afbc_crop_l = 0;
st->afbc_crop_r = 0;
st->afbc_crop_t = 0;
st->afbc_crop_b = 0;
}
for (i = 0; i < fb->format->num_planes; i++)
st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
dflow->in_y, i);
err = komeda_component_validate_private(&layer->base, c_st);
if (err)
return err;
/* update the data flow for the next stage */
komeda_component_set_output(&dflow->input, &layer->base, 0);
/*
* The rotation has been handled by layer, so adjusted the data flow for
* the next stage.
*/
komeda_rotate_data_flow(dflow, st->rot);
return 0;
}
static int
komeda_wb_layer_validate(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
struct komeda_component_state *c_st;
struct komeda_layer_state *st;
int i, err;
err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
if (err)
return err;
c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
conn_st->state, conn_st->connector, conn_st->crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_layer_st(c_st);
st->hsize = dflow->out_w;
st->vsize = dflow->out_h;
for (i = 0; i < kfb->base.format->num_planes; i++)
st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
dflow->out_y, i);
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
return 0;
}
static bool scaling_ratio_valid(u32 size_in, u32 size_out,
u32 max_upscaling, u32 max_downscaling)
{
if (size_out > size_in * max_upscaling)
return false;
else if (size_in > size_out * max_downscaling)
return false;
return true;
}
static int
komeda_scaler_check_cfg(struct komeda_scaler *scaler,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
u32 hsize_in, vsize_in, hsize_out, vsize_out;
u32 max_upscaling;
hsize_in = dflow->in_w;
vsize_in = dflow->in_h;
hsize_out = dflow->out_w;
vsize_out = dflow->out_h;
if (!malidp_in_range(&scaler->hsize, hsize_in) ||
!malidp_in_range(&scaler->hsize, hsize_out)) {
DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
return -EINVAL;
}
if (!malidp_in_range(&scaler->vsize, vsize_in) ||
!malidp_in_range(&scaler->vsize, vsize_out)) {
DRM_DEBUG_ATOMIC("Invalid vertical sizes");
return -EINVAL;
}
/* If input comes from compiz that means the scaling is for writeback
* and scaler can not do upscaling for writeback
*/
if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
max_upscaling = 1;
else
max_upscaling = scaler->max_upscaling;
if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
scaler->max_downscaling)) {
DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
return -EINVAL;
}
if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
scaler->max_downscaling)) {
DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
return -EINVAL;
}
if (hsize_in > hsize_out || vsize_in > vsize_out) {
struct komeda_pipeline *pipe = scaler->base.pipeline;
int err;
err = pipe->funcs->downscaling_clk_check(pipe,
&kcrtc_st->base.adjusted_mode,
komeda_crtc_get_aclk(kcrtc_st), dflow);
if (err) {
DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
return err;
}
}
return 0;
}
static int
komeda_scaler_validate(void *user,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_atomic_state *drm_st = kcrtc_st->base.state;
struct komeda_component_state *c_st;
struct komeda_scaler_state *st;
struct komeda_scaler *scaler;
int err = 0;
if (!(dflow->en_scaling || dflow->en_img_enhancement))
return 0;
scaler = komeda_component_get_avail_scaler(dflow->input.component,
drm_st);
if (!scaler) {
DRM_DEBUG_ATOMIC("No scaler available");
return -EINVAL;
}
err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
if (err)
return err;
c_st = komeda_component_get_state_and_set_user(&scaler->base,
drm_st, user, kcrtc_st->base.crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_scaler_st(c_st);
st->hsize_in = dflow->in_w;
st->vsize_in = dflow->in_h;
st->hsize_out = dflow->out_w;
st->vsize_out = dflow->out_h;
st->right_crop = dflow->right_crop;
st->left_crop = dflow->left_crop;
st->total_vsize_in = dflow->total_in_h;
st->total_hsize_in = dflow->total_in_w;
st->total_hsize_out = dflow->total_out_w;
/* Enable alpha processing if the next stage needs the pixel alpha */
st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
st->en_scaling = dflow->en_scaling;
st->en_img_enhancement = dflow->en_img_enhancement;
st->en_split = dflow->en_split;
st->right_part = dflow->right_part;
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &scaler->base, 0);
return err;
}
static void komeda_split_data_flow(struct komeda_scaler *scaler,
struct komeda_data_flow_cfg *dflow,
struct komeda_data_flow_cfg *l_dflow,
struct komeda_data_flow_cfg *r_dflow);
static int
komeda_splitter_validate(struct komeda_splitter *splitter,
struct drm_connector_state *conn_st,
struct komeda_data_flow_cfg *dflow,
struct komeda_data_flow_cfg *l_output,
struct komeda_data_flow_cfg *r_output)
{
struct komeda_component_state *c_st;
struct komeda_splitter_state *st;
if (!splitter) {
DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n");
return -EINVAL;
}
if (!malidp_in_range(&splitter->hsize, dflow->in_w)) {
DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
dflow->in_w);
return -EINVAL;
}
if (!malidp_in_range(&splitter->vsize, dflow->in_h)) {
DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
dflow->in_h);
return -EINVAL;
}
c_st = komeda_component_get_state_and_set_user(&splitter->base,
conn_st->state, conn_st->connector, conn_st->crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
komeda_split_data_flow(splitter->base.pipeline->scalers[0],
dflow, l_output, r_output);
st = to_splitter_st(c_st);
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
st->overlap = dflow->overlap;
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&l_output->input, &splitter->base, 0);
komeda_component_set_output(&r_output->input, &splitter->base, 1);
return 0;
}
static int
komeda_merger_validate(struct komeda_merger *merger,
void *user,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *left_input,
struct komeda_data_flow_cfg *right_input,
struct komeda_data_flow_cfg *output)
{
struct komeda_component_state *c_st;
struct komeda_merger_state *st;
int err = 0;
if (!merger) {
DRM_DEBUG_ATOMIC("No merger is available");
return -EINVAL;
}
if (!malidp_in_range(&merger->hsize_merged, output->out_w)) {
DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
output->out_w);
return -EINVAL;
}
if (!malidp_in_range(&merger->vsize_merged, output->out_h)) {
DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
output->out_h);
return -EINVAL;
}
c_st = komeda_component_get_state_and_set_user(&merger->base,
kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_merger_st(c_st);
st->hsize_merged = output->out_w;
st->vsize_merged = output->out_h;
komeda_component_add_input(c_st, &left_input->input, 0);
komeda_component_add_input(c_st, &right_input->input, 1);
komeda_component_set_output(&output->input, &merger->base, 0);
return err;
}
void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
u16 *hsize, u16 *vsize)
{
struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
if (hsize)
*hsize = m->hdisplay;
if (vsize)
*vsize = m->vdisplay;
}
static int
komeda_compiz_set_input(struct komeda_compiz *compiz,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_atomic_state *drm_st = kcrtc_st->base.state;
struct komeda_component_state *c_st, *old_st;
struct komeda_compiz_input_cfg *cin;
u16 compiz_w, compiz_h;
int idx = dflow->blending_zorder;
pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
/* check display rect */
if ((dflow->out_x + dflow->out_w > compiz_w) ||
(dflow->out_y + dflow->out_h > compiz_h) ||
dflow->out_w == 0 || dflow->out_h == 0) {
DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
dflow->out_x, dflow->out_y,
dflow->out_w, dflow->out_h);
return -EINVAL;
}
c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
kcrtc_st->base.crtc, kcrtc_st->base.crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
if (komeda_component_check_input(c_st, &dflow->input, idx))
return -EINVAL;
cin = &(to_compiz_st(c_st)->cins[idx]);
cin->hsize = dflow->out_w;
cin->vsize = dflow->out_h;
cin->hoffset = dflow->out_x;
cin->voffset = dflow->out_y;
cin->pixel_blend_mode = dflow->pixel_blend_mode;
cin->layer_alpha = dflow->layer_alpha;
old_st = komeda_component_get_old_state(&compiz->base, drm_st);
/* compare with old to check if this input has been changed */
if (WARN_ON(!old_st) ||
memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
c_st->changed_active_inputs |= BIT(idx);
komeda_component_add_input(c_st, &dflow->input, idx);
komeda_component_set_output(&dflow->input, &compiz->base, 0);
return 0;
}
static int
komeda_compiz_validate(struct komeda_compiz *compiz,
struct komeda_crtc_state *state,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_component_state *c_st;
struct komeda_compiz_state *st;
c_st = komeda_component_get_state_and_set_user(&compiz->base,
state->base.state, state->base.crtc, state->base.crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_compiz_st(c_st);
pipeline_composition_size(state, &st->hsize, &st->vsize);
komeda_component_set_output(&dflow->input, &compiz->base, 0);
/* compiz output dflow will be fed to the next pipeline stage, prepare
* the data flow configuration for the next stage
*/
if (dflow) {
dflow->in_w = st->hsize;
dflow->in_h = st->vsize;
dflow->out_w = dflow->in_w;
dflow->out_h = dflow->in_h;
/* the output data of compiz doesn't have alpha, it only can be
* used as bottom layer when blend it with master layers
*/
dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
dflow->layer_alpha = 0xFF;
dflow->blending_zorder = 0;
}
return 0;
}
static int
komeda_improc_validate(struct komeda_improc *improc,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
struct drm_crtc_state *crtc_st = &kcrtc_st->base;
struct komeda_component_state *c_st;
struct komeda_improc_state *st;
c_st = komeda_component_get_state_and_set_user(&improc->base,
kcrtc_st->base.state, crtc, crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_improc_st(c_st);
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
if (drm_atomic_crtc_needs_modeset(crtc_st)) {
u32 output_depths, output_formats;
u32 avail_depths, avail_formats;
komeda_crtc_get_color_config(crtc_st, &output_depths,
&output_formats);
avail_depths = output_depths & improc->supported_color_depths;
if (avail_depths == 0) {
DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
output_depths,
improc->supported_color_depths);
return -EINVAL;
}
avail_formats = output_formats &
improc->supported_color_formats;
if (!avail_formats) {
DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
output_formats,
improc->supported_color_formats);
return -EINVAL;
}
st->color_depth = __fls(avail_depths);
st->color_format = BIT(__ffs(avail_formats));
}
if (kcrtc_st->base.color_mgmt_changed) {
drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut,
st->fgamma_coeffs);
drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs);
}
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
return 0;
}
static int
komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
struct komeda_timing_ctrlr_state *st;
struct komeda_component_state *c_st;
c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
kcrtc_st->base.state, crtc, crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_ctrlr_st(c_st);
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
return 0;
}
void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
struct komeda_data_flow_cfg *dflow,
struct drm_framebuffer *fb)
{
struct komeda_scaler *scaler = layer->base.pipeline->scalers[0];
u32 w = dflow->in_w;
u32 h = dflow->in_h;
dflow->total_in_w = dflow->in_w;
dflow->total_in_h = dflow->in_h;
dflow->total_out_w = dflow->out_w;
/* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
if (!fb->format->has_alpha)
dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
if (drm_rotation_90_or_270(dflow->rot))
swap(w, h);
dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
dflow->is_yuv = fb->format->is_yuv;
/* try to enable image enhancer if data flow is a 2x+ upscaling */
dflow->en_img_enhancement = dflow->out_w >= 2 * w ||
dflow->out_h >= 2 * h;
/* try to enable split if scaling exceed the scaler's acceptable
* input/output range.
*/
if (dflow->en_scaling && scaler)
dflow->en_split = !malidp_in_range(&scaler->hsize, dflow->in_w) ||
!malidp_in_range(&scaler->hsize, dflow->out_w);
}
static bool merger_is_available(struct komeda_pipeline *pipe,
struct komeda_data_flow_cfg *dflow)
{
u32 avail_inputs = pipe->merger ?
pipe->merger->base.supported_inputs : 0;
return has_bit(dflow->input.component->id, avail_inputs);
}
int komeda_build_layer_data_flow(struct komeda_layer *layer,
struct komeda_plane_state *kplane_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_plane *plane = kplane_st->base.plane;
struct komeda_pipeline *pipe = layer->base.pipeline;
int err;
DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
layer->base.name, plane->base.id, plane->name,
dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
err = komeda_layer_validate(layer, kplane_st, dflow);
if (err)
return err;
err = komeda_scaler_validate(plane, kcrtc_st, dflow);
if (err)
return err;
/* if split, check if can put the data flow into merger */
if (dflow->en_split && merger_is_available(pipe, dflow))
return 0;
err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
return err;
}
/*
* Split is introduced for workaround scaler's input/output size limitation.
* The idea is simple, if one scaler can not fit the requirement, use two.
* So split splits the big source image to two half parts (left/right) and do
* the scaling by two scaler separately and independently.
* But split also imports an edge problem in the middle of the image when
* scaling, to avoid it, split isn't a simple half-and-half, but add an extra
* pixels (overlap) to both side, after split the left/right will be:
* - left: [0, src_length/2 + overlap]
* - right: [src_length/2 - overlap, src_length]
* The extra overlap do eliminate the edge problem, but which may also generates
* unnecessary pixels when scaling, we need to crop them before scaler output
* the result to the next stage. and for the how to crop, it depends on the
* unneeded pixels, another words the position where overlay has been added.
* - left: crop the right
* - right: crop the left
*
* The diagram for how to do the split
*
* <---------------------left->out_w ---------------->
* |--------------------------------|---right_crop-----| <- left after split
* \ \ /
* \ \<--overlap--->/
* |-----------------|-------------|(Middle)------|-----------------| <- src
* /<---overlap--->\ \
* / \ \
* right after split->|-----left_crop---|--------------------------------|
* ^<------------------- right->out_w --------------->^
*
* NOTE: To consistent with HW the output_w always contains the crop size.
*/
static void komeda_split_data_flow(struct komeda_scaler *scaler,
struct komeda_data_flow_cfg *dflow,
struct komeda_data_flow_cfg *l_dflow,
struct komeda_data_flow_cfg *r_dflow)
{
bool r90 = drm_rotation_90_or_270(dflow->rot);
bool flip_h = has_flip_h(dflow->rot);
u32 l_out, r_out, overlap;
memcpy(l_dflow, dflow, sizeof(*dflow));
memcpy(r_dflow, dflow, sizeof(*dflow));
l_dflow->right_part = false;
r_dflow->right_part = true;
r_dflow->blending_zorder = dflow->blending_zorder + 1;
overlap = 0;
if (dflow->en_scaling && scaler)
overlap += scaler->scaling_split_overlap;
/* original dflow may fed into splitter, and which doesn't need
* enhancement overlap
*/
dflow->overlap = overlap;
if (dflow->en_img_enhancement && scaler)
overlap += scaler->enh_split_overlap;
l_dflow->overlap = overlap;
r_dflow->overlap = overlap;
/* split the origin content */
/* left/right here always means the left/right part of display image,
* not the source Image
*/
/* DRM rotation is anti-clockwise */
if (r90) {
if (dflow->en_scaling) {
l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
r_dflow->in_h = l_dflow->in_h;
} else if (dflow->en_img_enhancement) {
/* enhancer only */
l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap;
} else {
/* split without scaler, no overlap */
l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2);
r_dflow->in_h = dflow->in_h - l_dflow->in_h;
}
/* Consider YUV format, after split, the split source w/h
* may not aligned to 2. we have two choices for such case.
* 1. scaler is enabled (overlap != 0), we can do a alignment
* both left/right and crop the extra data by scaler.
* 2. scaler is not enabled, only align the split left
* src/disp, and the rest part assign to right
*/
if ((overlap != 0) && dflow->is_yuv) {
l_dflow->in_h = ALIGN(l_dflow->in_h, 2);
r_dflow->in_h = ALIGN(r_dflow->in_h, 2);
}
if (flip_h)
l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h;
else
r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h;
} else {
if (dflow->en_scaling) {
l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
r_dflow->in_w = l_dflow->in_w;
} else if (dflow->en_img_enhancement) {
l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap;
} else {
l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2);
r_dflow->in_w = dflow->in_w - l_dflow->in_w;
}
/* do YUV alignment when scaler enabled */
if ((overlap != 0) && dflow->is_yuv) {
l_dflow->in_w = ALIGN(l_dflow->in_w, 2);
r_dflow->in_w = ALIGN(r_dflow->in_w, 2);
}
/* on flip_h, the left display content from the right-source */
if (flip_h)
l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w;
else
r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w;
}
/* split the disp_rect */
if (dflow->en_scaling || dflow->en_img_enhancement)
l_dflow->out_w = ((dflow->out_w + 1) >> 1);
else
l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2);
r_dflow->out_w = dflow->out_w - l_dflow->out_w;
l_dflow->out_x = dflow->out_x;
r_dflow->out_x = l_dflow->out_w + l_dflow->out_x;
/* calculate the scaling crop */
/* left scaler output more data and do crop */
if (r90) {
l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h;
r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h;
} else {
l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w;
r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w;
}
l_dflow->left_crop = 0;
l_dflow->right_crop = l_out - l_dflow->out_w;
r_dflow->left_crop = r_out - r_dflow->out_w;
r_dflow->right_crop = 0;
/* out_w includes the crop length */
l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop;
r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop;
}
/* For layer split, a plane state will be split to two data flows and handled
* by two separated komeda layer input pipelines. komeda supports two types of
* layer split:
* - none-scaling split:
* / layer-left -> \
* plane_state compiz-> ...
* \ layer-right-> /
*
* - scaling split:
* / layer-left -> scaler->\
* plane_state merger -> compiz-> ...
* \ layer-right-> scaler->/
*
* Since merger only supports scaler as input, so for none-scaling split, two
* layer data flows will be output to compiz directly. for scaling_split, two
* data flow will be merged by merger firstly, then merger outputs one merged
* data flow to compiz.
*/
int komeda_build_layer_split_data_flow(struct komeda_layer *left,
struct komeda_plane_state *kplane_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_plane *plane = kplane_st->base.plane;
struct komeda_pipeline *pipe = left->base.pipeline;
struct komeda_layer *right = left->right;
struct komeda_data_flow_cfg l_dflow, r_dflow;
int err;
komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow);
DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: "
"src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
left->base.name, right->base.name,
plane->base.id, plane->name,
dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow);
if (err)
return err;
err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow);
if (err)
return err;
/* The rotation has been handled by layer, so adjusted the data flow */
komeda_rotate_data_flow(dflow, dflow->rot);
/* left and right dflow has been merged to compiz already,
* no need merger to merge them anymore.
*/
if (r_dflow.input.component == l_dflow.input.component)
return 0;
/* line merger path */
err = komeda_merger_validate(pipe->merger, plane, kcrtc_st,
&l_dflow, &r_dflow, dflow);
if (err)
return err;
err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
return err;
}
/* writeback data path: compiz -> scaler -> wb_layer -> memory */
int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_connector *conn = conn_st->connector;
int err;
err = komeda_scaler_validate(conn, kcrtc_st, dflow);
if (err)
return err;
return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
}
/* writeback scaling split data path:
* /-> scaler ->\
* compiz -> splitter merger -> wb_layer -> memory
* \-> scaler ->/
*/
int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_pipeline *pipe = wb_layer->base.pipeline;
struct drm_connector *conn = conn_st->connector;
struct komeda_data_flow_cfg l_dflow, r_dflow;
int err;
err = komeda_splitter_validate(pipe->splitter, conn_st,
dflow, &l_dflow, &r_dflow);
if (err)
return err;
err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow);
if (err)
return err;
err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow);
if (err)
return err;
err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st,
&l_dflow, &r_dflow, dflow);
if (err)
return err;
return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
}
/* build display output data flow, the data path is:
* compiz -> improc -> timing_ctrlr
*/
int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
struct komeda_crtc_state *kcrtc_st)
{
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
struct komeda_data_flow_cfg m_dflow; /* master data flow */
struct komeda_data_flow_cfg s_dflow; /* slave data flow */
int err;
memset(&m_dflow, 0, sizeof(m_dflow));
memset(&s_dflow, 0, sizeof(s_dflow));
if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) {
err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow);
if (err)
return err;
/* merge the slave dflow into master pipeline */
err = komeda_compiz_set_input(master->compiz, kcrtc_st,
&s_dflow);
if (err)
return err;
}
err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
if (err)
return err;
err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
if (err)
return err;
err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
if (err)
return err;
return 0;
}
static void
komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *new)
{
struct drm_atomic_state *drm_st = new->obj.state;
struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
struct komeda_component_state *c_st;
struct komeda_component *c;
u32 id;
unsigned long disabling_comps;
WARN_ON(!old);
disabling_comps = (~new->active_comps) & old->active_comps;
/* unbound all disabling component */
for_each_set_bit(id, &disabling_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
c_st = komeda_component_get_state_and_set_user(c,
drm_st, NULL, new->crtc);
WARN_ON(IS_ERR(c_st));
}
}
/* release unclaimed pipeline resource */
int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
struct komeda_crtc_state *kcrtc_st)
{
struct drm_atomic_state *drm_st = kcrtc_st->base.state;
struct komeda_pipeline_state *st;
/* ignore the pipeline which is not affected */
if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
return 0;
if (has_bit(pipe->id, kcrtc_st->active_pipes))
st = komeda_pipeline_get_new_state(pipe, drm_st);
else
st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
if (WARN_ON(IS_ERR_OR_NULL(st)))
return -EINVAL;
komeda_pipeline_unbound_components(pipe, st);
return 0;
}
/* Since standalone disabled components must be disabled separately and in the
* last, So a complete disable operation may needs to call pipeline_disable
* twice (two phase disabling).
* Phase 1: disable the common components, flush it.
* Phase 2: disable the standalone disabled components, flush it.
*
* RETURNS:
* true: disable is not complete, needs a phase 2 disable.
* false: disable is complete.
*/
bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state)
{
struct komeda_pipeline_state *old;
struct komeda_component *c;
struct komeda_component_state *c_st;
u32 id;
unsigned long disabling_comps;
old = komeda_pipeline_get_old_state(pipe, old_state);
disabling_comps = old->active_comps &
(~pipe->standalone_disabled_comps);
if (!disabling_comps)
disabling_comps = old->active_comps &
pipe->standalone_disabled_comps;
DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
pipe->id, old->active_comps, disabling_comps);
for_each_set_bit(id, &disabling_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
c_st = priv_to_comp_st(c->obj.state);
/*
* If we disabled a component then all active_inputs should be
* put in the list of changed_active_inputs, so they get
* re-enabled.
* This usually happens during a modeset when the pipeline is
* first disabled and then the actual state gets committed
* again.
*/
c_st->changed_active_inputs |= c_st->active_inputs;
c->funcs->disable(c);
}
/* Update the pipeline state, if there are components that are still
* active, return true for calling the phase 2 disable.
*/
old->active_comps &= ~disabling_comps;
return old->active_comps ? true : false;
}
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state)
{
struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
struct komeda_pipeline_state *old;
struct komeda_component *c;
u32 id;
unsigned long changed_comps;
old = komeda_pipeline_get_old_state(pipe, old_state);
changed_comps = new->active_comps | old->active_comps;
DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
pipe->id, new->active_comps, changed_comps);
for_each_set_bit(id, &changed_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
if (new->active_comps & BIT(c->id))
c->funcs->update(c, priv_to_comp_st(c->obj.state));
else
c->funcs->disable(c);
}
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include "komeda_color_mgmt.h"
/* 10bit precision YUV2RGB matrix */
static const s32 yuv2rgb_bt601_narrow[KOMEDA_N_YUV2RGB_COEFFS] = {
1192, 0, 1634,
1192, -401, -832,
1192, 2066, 0,
64, 512, 512
};
static const s32 yuv2rgb_bt601_wide[KOMEDA_N_YUV2RGB_COEFFS] = {
1024, 0, 1436,
1024, -352, -731,
1024, 1815, 0,
0, 512, 512
};
static const s32 yuv2rgb_bt709_narrow[KOMEDA_N_YUV2RGB_COEFFS] = {
1192, 0, 1836,
1192, -218, -546,
1192, 2163, 0,
64, 512, 512
};
static const s32 yuv2rgb_bt709_wide[KOMEDA_N_YUV2RGB_COEFFS] = {
1024, 0, 1613,
1024, -192, -479,
1024, 1900, 0,
0, 512, 512
};
static const s32 yuv2rgb_bt2020[KOMEDA_N_YUV2RGB_COEFFS] = {
1024, 0, 1476,
1024, -165, -572,
1024, 1884, 0,
0, 512, 512
};
const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range)
{
bool narrow = color_range == DRM_COLOR_YCBCR_LIMITED_RANGE;
const s32 *coeffs;
switch (color_encoding) {
case DRM_COLOR_YCBCR_BT709:
coeffs = narrow ? yuv2rgb_bt709_narrow : yuv2rgb_bt709_wide;
break;
case DRM_COLOR_YCBCR_BT601:
coeffs = narrow ? yuv2rgb_bt601_narrow : yuv2rgb_bt601_wide;
break;
case DRM_COLOR_YCBCR_BT2020:
coeffs = yuv2rgb_bt2020;
break;
default:
coeffs = NULL;
break;
}
return coeffs;
}
struct gamma_curve_sector {
u32 boundary_start;
u32 num_of_segments;
u32 segment_width;
};
struct gamma_curve_segment {
u32 start;
u32 end;
};
static struct gamma_curve_sector sector_tbl[] = {
{ 0, 4, 4 },
{ 16, 4, 4 },
{ 32, 4, 8 },
{ 64, 4, 16 },
{ 128, 4, 32 },
{ 256, 4, 64 },
{ 512, 16, 32 },
{ 1024, 24, 128 },
};
static void
drm_lut_to_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs,
struct gamma_curve_sector *sector_tbl, u32 num_sectors)
{
struct drm_color_lut *lut;
u32 i, j, in, num = 0;
if (!lut_blob)
return;
lut = lut_blob->data;
for (i = 0; i < num_sectors; i++) {
for (j = 0; j < sector_tbl[i].num_of_segments; j++) {
in = sector_tbl[i].boundary_start +
j * sector_tbl[i].segment_width;
coeffs[num++] = drm_color_lut_extract(lut[in].red,
KOMEDA_COLOR_PRECISION);
}
}
coeffs[num] = BIT(KOMEDA_COLOR_PRECISION);
}
void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs)
{
drm_lut_to_coeffs(lut_blob, coeffs, sector_tbl, ARRAY_SIZE(sector_tbl));
}
void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs)
{
struct drm_color_ctm *ctm;
u32 i;
if (!ctm_blob)
return;
ctm = ctm_blob->data;
for (i = 0; i < KOMEDA_N_CTM_COEFFS; i++)
coeffs[i] = drm_color_ctm_s31_32_to_qm_n(ctm->matrix[i], 3, 12);
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <linux/interrupt.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
#include "komeda_framebuffer.h"
#include "komeda_kms.h"
DEFINE_DRM_GEM_DMA_FOPS(komeda_cma_fops);
static int komeda_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct komeda_dev *mdev = dev->dev_private;
u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->pitch = ALIGN(pitch, mdev->chip.bus_width);
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
{
struct drm_device *drm = data;
struct komeda_dev *mdev = drm->dev_private;
struct komeda_kms_dev *kms = to_kdev(drm);
struct komeda_events evts;
irqreturn_t status;
u32 i;
/* Call into the CHIP to recognize events */
memset(&evts, 0, sizeof(evts));
status = mdev->funcs->irq_handler(mdev, &evts);
komeda_print_events(&evts, drm);
/* Notify the crtc to handle the events */
for (i = 0; i < kms->n_crtcs; i++)
komeda_crtc_handle_event(&kms->crtcs[i], &evts);
return status;
}
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
.date = "20181101",
.major = 0,
.minor = 1,
};
static void komeda_kms_atomic_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct komeda_kms_dev *kms = to_kdev(dev);
int i;
for (i = 0; i < kms->n_crtcs; i++) {
struct komeda_crtc *kcrtc = &kms->crtcs[i];
if (kcrtc->base.state->active) {
struct completion *flip_done = NULL;
if (kcrtc->base.state->event)
flip_done = kcrtc->base.state->event->base.completion;
komeda_crtc_flush_and_wait_for_flip_done(kcrtc, flip_done);
}
}
drm_atomic_helper_commit_hw_done(state);
}
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
komeda_kms_atomic_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_cleanup_planes(dev, old_state);
}
static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
.atomic_commit_tail = komeda_kms_commit_tail,
};
static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
struct list_head *zorder_list)
{
struct komeda_plane_state *new = to_kplane_st(plane_st);
struct komeda_plane_state *node, *last;
last = list_empty(zorder_list) ?
NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
/* Considering the list sequence is zpos increasing, so if list is empty
* or the zpos of new node bigger than the last node in list, no need
* loop and just insert the new one to the tail of the list.
*/
if (!last || (new->base.zpos > last->base.zpos)) {
list_add_tail(&new->zlist_node, zorder_list);
return 0;
}
/* Build the list by zpos increasing */
list_for_each_entry(node, zorder_list, zlist_node) {
if (new->base.zpos < node->base.zpos) {
list_add_tail(&new->zlist_node, &node->zlist_node);
break;
} else if (node->base.zpos == new->base.zpos) {
struct drm_plane *a = node->base.plane;
struct drm_plane *b = new->base.plane;
/* Komeda doesn't support setting a same zpos for
* different planes.
*/
DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
a->name, b->name, node->base.zpos);
return -EINVAL;
}
}
return 0;
}
static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_st)
{
struct drm_atomic_state *state = crtc_st->state;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct komeda_plane_state *kplane_st;
struct drm_plane_state *plane_st;
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
INIT_LIST_HEAD(&zorder_list);
/* This loop also added all effected planes into the new state */
drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
plane_st = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_st))
return PTR_ERR(plane_st);
/* Build a list by zpos increasing */
err = komeda_plane_state_list_add(plane_st, &zorder_list);
if (err)
return err;
}
kcrtc_st->max_slave_zorder = 0;
list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
plane_st = &kplane_st->base;
plane = plane_st->plane;
plane_st->normalized_zpos = order++;
/* When layer_split has been enabled, one plane will be handled
* by two separated komeda layers (left/right), which may needs
* two zorders.
* - zorder: for left_layer for left display part.
* - zorder + 1: will be reserved for right layer.
*/
if (to_kplane_st(plane_st)->layer_split)
order++;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
plane->base.id, plane->name,
plane_st->zpos, plane_st->normalized_zpos);
/* calculate max slave zorder */
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
kcrtc_st->max_slave_zorder =
max(plane_st->normalized_zpos,
kcrtc_st->max_slave_zorder);
}
crtc_st->zpos_changed = true;
return 0;
}
static int komeda_kms_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_st;
int i, err;
err = drm_atomic_helper_check_modeset(dev, state);
if (err)
return err;
/* Komeda need to re-calculate resource assumption in every commit
* so need to add all affected_planes (even unchanged) to
* drm_atomic_state.
*/
for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
err = drm_atomic_add_affected_planes(state, crtc);
if (err)
return err;
err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
if (err)
return err;
}
err = drm_atomic_helper_check_planes(dev, state);
if (err)
return err;
return 0;
}
static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
.fb_create = komeda_fb_create,
.atomic_check = komeda_kms_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
struct drm_mode_config *config = &kms->base.mode_config;
drm_mode_config_init(&kms->base);
komeda_kms_setup_crtcs(kms, mdev);
/* Get value from dev */
config->min_width = 0;
config->min_height = 0;
config->max_width = 4096;
config->max_height = 4096;
config->funcs = &komeda_mode_config_funcs;
config->helper_private = &komeda_mode_config_helpers;
}
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
{
struct komeda_kms_dev *kms;
struct drm_device *drm;
int err;
kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
struct komeda_kms_dev, base);
if (IS_ERR(kms))
return kms;
drm = &kms->base;
drm->dev_private = mdev;
komeda_kms_mode_config_init(kms, mdev);
err = komeda_kms_add_private_objs(kms, mdev);
if (err)
goto cleanup_mode_config;
err = komeda_kms_add_planes(kms, mdev);
if (err)
goto cleanup_mode_config;
err = drm_vblank_init(drm, kms->n_crtcs);
if (err)
goto cleanup_mode_config;
err = komeda_kms_add_crtcs(kms, mdev);
if (err)
goto cleanup_mode_config;
err = komeda_kms_add_wb_connectors(kms, mdev);
if (err)
goto cleanup_mode_config;
drm_mode_config_reset(drm);
err = devm_request_irq(drm->dev, mdev->irq,
komeda_kms_irq_handler, IRQF_SHARED,
drm->driver->name, drm);
if (err)
goto cleanup_mode_config;
drm_kms_helper_poll_init(drm);
err = drm_dev_register(drm, 0);
if (err)
goto free_interrupts;
return kms;
free_interrupts:
drm_kms_helper_poll_fini(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
return ERR_PTR(err);
}
void komeda_kms_detach(struct komeda_kms_dev *kms)
{
struct drm_device *drm = &kms->base;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_kms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
u32 *color_depths, u32 *color_formats)
{
struct drm_connector *conn;
struct drm_connector_state *conn_st;
u32 conn_color_formats = ~0u;
int i, min_bpc = 31, conn_bpc = 0;
for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) {
if (conn_st->crtc != crtc_st->crtc)
continue;
conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8;
conn_color_formats &= conn->display_info.color_formats;
if (conn_bpc < min_bpc)
min_bpc = conn_bpc;
}
/* connector doesn't config any color_format, use RGB444 as default */
if (!conn_color_formats)
conn_color_formats = DRM_COLOR_FORMAT_RGB444;
*color_depths = GENMASK(min_bpc, 0);
*color_formats = conn_color_formats;
}
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
if (!kcrtc_st->base.active) {
kcrtc_st->clock_ratio = 0;
return;
}
pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL;
aclk = komeda_crtc_get_aclk(kcrtc_st);
kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
}
/**
* komeda_crtc_atomic_check - build display output data flow
* @crtc: DRM crtc
* @state: the crtc state object
*
* crtc_atomic_check is the final check stage, so beside build a display data
* pipeline according to the crtc_state, but still needs to release or disable
* the unclaimed pipeline resources.
*
* RETURNS:
* Zero for success or -errno
*/
static int
komeda_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_state);
int err;
if (drm_atomic_crtc_needs_modeset(crtc_state))
komeda_crtc_update_clock_ratio(kcrtc_st);
if (crtc_state->active) {
err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
if (err)
return err;
}
/* release unclaimed pipeline resources */
err = komeda_release_unclaimed_resources(kcrtc->slave, kcrtc_st);
if (err)
return err;
err = komeda_release_unclaimed_resources(kcrtc->master, kcrtc_st);
if (err)
return err;
return 0;
}
/* For active a crtc, mainly need two parts of preparation
* 1. adjust display operation mode.
* 2. enable needed clk
*/
static int
komeda_crtc_prepare(struct komeda_crtc *kcrtc)
{
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
struct drm_display_mode *mode = &kcrtc_st->base.adjusted_mode;
u32 new_mode;
int err;
mutex_lock(&mdev->lock);
new_mode = mdev->dpmode | BIT(master->id);
if (WARN_ON(new_mode == mdev->dpmode)) {
err = 0;
goto unlock;
}
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
mdev->dpmode, new_mode);
goto unlock;
}
mdev->dpmode = new_mode;
/* Only need to enable aclk on single display mode, but no need to
* enable aclk it on dual display mode, since the dual mode always
* switch from single display mode, the aclk already enabled, no need
* to enable it again.
*/
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
if (err)
DRM_ERROR("failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
if (err)
DRM_ERROR("failed to enable aclk.\n");
}
err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000);
if (err)
DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
err = clk_prepare_enable(master->pxlclk);
if (err)
DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
unlock:
mutex_unlock(&mdev->lock);
return err;
}
static int
komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
{
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
u32 new_mode;
int err;
mutex_lock(&mdev->lock);
new_mode = mdev->dpmode & (~BIT(master->id));
if (WARN_ON(new_mode == mdev->dpmode)) {
err = 0;
goto unlock;
}
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
mdev->dpmode, new_mode);
goto unlock;
}
mdev->dpmode = new_mode;
clk_disable_unprepare(master->pxlclk);
if (new_mode == KOMEDA_MODE_INACTIVE)
clk_disable_unprepare(mdev->aclk);
unlock:
mutex_unlock(&mdev->lock);
return err;
}
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
struct komeda_events *evts)
{
struct drm_crtc *crtc = &kcrtc->base;
u32 events = evts->pipes[kcrtc->master->id];
if (events & KOMEDA_EVENT_VSYNC)
drm_crtc_handle_vblank(crtc);
if (events & KOMEDA_EVENT_EOW) {
struct komeda_wb_connector *wb_conn = kcrtc->wb_conn;
if (wb_conn)
drm_writeback_signal_completion(&wb_conn->base, 0);
else
DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n",
drm_crtc_index(&kcrtc->base));
}
/* will handle it together with the write back support */
if (events & KOMEDA_EVENT_EOW)
DRM_DEBUG("EOW.\n");
if (events & KOMEDA_EVENT_FLIP) {
unsigned long flags;
struct drm_pending_vblank_event *event;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (kcrtc->disable_done) {
complete_all(kcrtc->disable_done);
kcrtc->disable_done = NULL;
} else if (crtc->state->event) {
event = crtc->state->event;
/*
* Consume event before notifying drm core that flip
* happened.
*/
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
} else {
DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n",
drm_crtc_index(&kcrtc->base));
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
static void
komeda_crtc_do_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc->state);
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
struct komeda_wb_connector *wb_conn = kcrtc->wb_conn;
struct drm_connector_state *conn_st;
DRM_DEBUG_ATOMIC("CRTC%d_FLUSH: active_pipes: 0x%x, affected: 0x%x.\n",
drm_crtc_index(crtc),
kcrtc_st->active_pipes, kcrtc_st->affected_pipes);
/* step 1: update the pipeline/component state to HW */
if (has_bit(master->id, kcrtc_st->affected_pipes))
komeda_pipeline_update(master, old->state);
if (slave && has_bit(slave->id, kcrtc_st->affected_pipes))
komeda_pipeline_update(slave, old->state);
conn_st = wb_conn ? wb_conn->base.base.state : NULL;
if (conn_st && conn_st->writeback_job)
drm_writeback_queue_job(&wb_conn->base, conn_st);
/* step 2: notify the HW to kickoff the update */
mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes);
}
static void
komeda_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
crtc);
pm_runtime_get_sync(crtc->dev->dev);
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
WARN_ON(drm_crtc_vblank_get(crtc));
komeda_crtc_do_flush(crtc, old);
}
void
komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
struct completion *input_flip_done)
{
struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->master->mdev;
struct completion *flip_done;
struct completion temp;
int timeout;
/* if caller doesn't send a flip_done, use a private flip_done */
if (input_flip_done) {
flip_done = input_flip_done;
} else {
init_completion(&temp);
kcrtc->disable_done = &temp;
flip_done = &temp;
}
mdev->funcs->flush(mdev, kcrtc->master->id, 0);
/* wait the flip take affect.*/
timeout = wait_for_completion_timeout(flip_done, HZ);
if (timeout == 0) {
DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
if (!input_flip_done) {
unsigned long flags;
spin_lock_irqsave(&drm->event_lock, flags);
kcrtc->disable_done = NULL;
spin_unlock_irqrestore(&drm->event_lock, flags);
}
}
}
static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
crtc);
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
struct completion *disable_done;
bool needs_phase2 = false;
DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n",
drm_crtc_index(crtc),
old_st->active_pipes, old_st->affected_pipes);
if (slave && has_bit(slave->id, old_st->active_pipes))
komeda_pipeline_disable(slave, old->state);
if (has_bit(master->id, old_st->active_pipes))
needs_phase2 = komeda_pipeline_disable(master, old->state);
/* crtc_disable has two scenarios according to the state->active switch.
* 1. active -> inactive
* this commit is a disable commit. and the commit will be finished
* or done after the disable operation. on this case we can directly
* use the crtc->state->event to tracking the HW disable operation.
* 2. active -> active
* the crtc->commit is not for disable, but a modeset operation when
* crtc is active, such commit actually has been completed by 3
* DRM operations:
* crtc_disable, update_planes(crtc_flush), crtc_enable
* so on this case the crtc->commit is for the whole process.
* we can not use it for tracing the disable, we need a temporary
* flip_done for tracing the disable. and crtc->state->event for
* the crtc_enable operation.
* That's also the reason why skip modeset commit in
* komeda_crtc_atomic_flush()
*/
disable_done = (needs_phase2 || crtc->state->active) ?
NULL : &crtc->state->commit->flip_done;
/* wait phase 1 disable done */
komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
/* phase 2 */
if (needs_phase2) {
komeda_pipeline_disable(kcrtc->master, old->state);
disable_done = crtc->state->active ?
NULL : &crtc->state->commit->flip_done;
komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
}
drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
komeda_crtc_unprepare(kcrtc);
pm_runtime_put(crtc->dev->dev);
}
static void
komeda_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
crtc);
/* commit with modeset will be handled in enable/disable */
if (drm_atomic_crtc_needs_modeset(crtc_state))
return;
komeda_crtc_do_flush(crtc, old);
}
/* Returns the minimum frequency of the aclk rate (main engine clock) in Hz */
static unsigned long
komeda_calc_min_aclk_rate(struct komeda_crtc *kcrtc,
unsigned long pxlclk)
{
/* Once dual-link one display pipeline drives two display outputs,
* the aclk needs run on the double rate of pxlclk
*/
if (kcrtc->master->dual_link)
return pxlclk * 2;
else
return pxlclk;
}
/* Get current aclk rate that specified by state */
unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
struct komeda_dev *mdev = crtc->dev->dev_private;
unsigned long pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000;
unsigned long min_aclk;
min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), pxlclk);
return clk_round_rate(mdev->aclk, min_aclk);
}
static enum drm_mode_status
komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
{
struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_pipeline *master = kcrtc->master;
unsigned long min_pxlclk, min_aclk;
if (m->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
min_pxlclk = m->clock * 1000;
if (master->dual_link)
min_pxlclk /= 2;
if (min_pxlclk != clk_round_rate(master->pxlclk, min_pxlclk)) {
DRM_DEBUG_ATOMIC("pxlclk doesn't support %lu Hz\n", min_pxlclk);
return MODE_NOCLOCK;
}
min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), min_pxlclk);
if (clk_round_rate(mdev->aclk, min_aclk) < min_aclk) {
DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %lu.\n",
m->name, min_pxlclk);
return MODE_CLOCK_HIGH;
}
return MODE_OK;
}
static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *m,
struct drm_display_mode *adjusted_mode)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
unsigned long clk_rate;
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* In dual link half the horizontal settings */
if (kcrtc->master->dual_link) {
adjusted_mode->crtc_clock /= 2;
adjusted_mode->crtc_hdisplay /= 2;
adjusted_mode->crtc_hsync_start /= 2;
adjusted_mode->crtc_hsync_end /= 2;
adjusted_mode->crtc_htotal /= 2;
}
clk_rate = adjusted_mode->crtc_clock * 1000;
/* crtc_clock will be used as the komeda output pixel clock */
adjusted_mode->crtc_clock = clk_round_rate(kcrtc->master->pxlclk,
clk_rate) / 1000;
return true;
}
static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
.atomic_check = komeda_crtc_atomic_check,
.atomic_flush = komeda_crtc_atomic_flush,
.atomic_enable = komeda_crtc_atomic_enable,
.atomic_disable = komeda_crtc_atomic_disable,
.mode_valid = komeda_crtc_mode_valid,
.mode_fixup = komeda_crtc_mode_fixup,
};
static void komeda_crtc_reset(struct drm_crtc *crtc)
{
struct komeda_crtc_state *state;
if (crtc->state)
__drm_atomic_helper_crtc_destroy_state(crtc->state);
kfree(to_kcrtc_st(crtc->state));
crtc->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
}
static struct drm_crtc_state *
komeda_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct komeda_crtc_state *old = to_kcrtc_st(crtc->state);
struct komeda_crtc_state *new;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &new->base);
new->affected_pipes = old->active_pipes;
new->clock_ratio = old->clock_ratio;
new->max_slave_zorder = old->max_slave_zorder;
return &new->base;
}
static void komeda_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(to_kcrtc_st(state));
}
static int komeda_crtc_vblank_enable(struct drm_crtc *crtc)
{
struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, true);
return 0;
}
static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
{
struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
}
static const struct drm_crtc_funcs komeda_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = komeda_crtc_reset,
.atomic_duplicate_state = komeda_crtc_atomic_duplicate_state,
.atomic_destroy_state = komeda_crtc_atomic_destroy_state,
.enable_vblank = komeda_crtc_vblank_enable,
.disable_vblank = komeda_crtc_vblank_disable,
};
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
struct komeda_crtc *crtc;
struct komeda_pipeline *master;
char str[16];
int i;
kms->n_crtcs = 0;
for (i = 0; i < mdev->n_pipelines; i++) {
crtc = &kms->crtcs[kms->n_crtcs];
master = mdev->pipelines[i];
crtc->master = master;
crtc->slave = komeda_pipeline_get_slave(master);
if (crtc->slave)
sprintf(str, "pipe-%d", crtc->slave->id);
else
sprintf(str, "None");
DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n",
kms->n_crtcs, master->id, str);
kms->n_crtcs++;
}
return 0;
}
static struct drm_plane *
get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc)
{
struct komeda_plane *kplane;
struct drm_plane *plane;
drm_for_each_plane(plane, &kms->base) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
continue;
kplane = to_kplane(plane);
/* only master can be primary */
if (kplane->layer->base.pipeline == crtc->master)
return plane;
}
return NULL;
}
static int komeda_crtc_add(struct komeda_kms_dev *kms,
struct komeda_crtc *kcrtc)
{
struct drm_crtc *crtc = &kcrtc->base;
struct drm_device *base = &kms->base;
struct drm_bridge *bridge;
int err;
err = drm_crtc_init_with_planes(base, crtc,
get_crtc_primary(kms, kcrtc), NULL,
&komeda_crtc_funcs, NULL);
if (err)
return err;
drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs);
crtc->port = kcrtc->master->of_output_port;
/* Construct an encoder for each pipeline and attach it to the remote
* bridge
*/
kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc);
err = drm_simple_encoder_init(base, &kcrtc->encoder,
DRM_MODE_ENCODER_TMDS);
if (err)
return err;
bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node,
KOMEDA_OF_PORT_OUTPUT, 0);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0);
drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
return err;
}
int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
{
int i, err;
for (i = 0; i < kms->n_crtcs; i++) {
err = komeda_crtc_add(kms, &kms->crtcs[i]);
if (err)
return err;
}
return 0;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_crtc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <drm/drm_atomic.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
struct komeda_str {
char *str;
u32 sz;
u32 len;
};
/* return 0 on success, < 0 on no space.
*/
__printf(2, 3)
static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
{
va_list args;
int num, free_sz;
int err;
free_sz = str->sz - str->len - 1;
if (free_sz <= 0)
return -ENOSPC;
va_start(args, fmt);
num = vsnprintf(str->str + str->len, free_sz, fmt, args);
va_end(args);
if (num < free_sz) {
str->len += num;
err = 0;
} else {
str->len = str->sz - 1;
err = -ENOSPC;
}
return err;
}
static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg)
{
if (evt)
komeda_sprintf(str, msg);
}
static void evt_str(struct komeda_str *str, u64 events)
{
if (events == 0ULL) {
komeda_sprintf(str, "None");
return;
}
evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|");
evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|");
evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|");
evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|");
evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|");
evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|");
/* GLB error */
evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|");
evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
/* DOU error */
evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|");
evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|");
evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|");
/* LPU errors or events */
evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
evt_sprintf(str, events & KOMEDA_EVENT_EMPTY, "EMPTY|");
evt_sprintf(str, events & KOMEDA_EVENT_FULL, "FULL|");
evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|");
evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|");
/* LPU TBU errors*/
evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|");
evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|");
evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|");
evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|");
/* CU errors*/
evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|");
evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|");
evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|");
evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
if (str->len > 0 && (str->str[str->len - 1] == '|')) {
str->str[str->len - 1] = 0;
str->len--;
}
}
static bool is_new_frame(struct komeda_events *a)
{
return (a->pipes[0] | a->pipes[1]) &
(KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
}
void komeda_print_events(struct komeda_events *evts, struct drm_device *dev)
{
u64 print_evts = 0;
static bool en_print = true;
struct komeda_dev *mdev = dev->dev_private;
u16 const err_verbosity = mdev->err_verbosity;
u64 evts_mask = evts->global | evts->pipes[0] | evts->pipes[1];
/* reduce the same msg print, only print the first evt for one frame */
if (evts->global || is_new_frame(evts))
en_print = true;
if (!(err_verbosity & KOMEDA_DEV_PRINT_DISABLE_RATELIMIT) && !en_print)
return;
if (err_verbosity & KOMEDA_DEV_PRINT_ERR_EVENTS)
print_evts |= KOMEDA_ERR_EVENTS;
if (err_verbosity & KOMEDA_DEV_PRINT_WARN_EVENTS)
print_evts |= KOMEDA_WARN_EVENTS;
if (err_verbosity & KOMEDA_DEV_PRINT_INFO_EVENTS)
print_evts |= KOMEDA_INFO_EVENTS;
if (evts_mask & print_evts) {
char msg[256];
struct komeda_str str;
struct drm_printer p = drm_info_printer(dev->dev);
str.str = msg;
str.sz = sizeof(msg);
str.len = 0;
komeda_sprintf(&str, "gcu: ");
evt_str(&str, evts->global);
komeda_sprintf(&str, ", pipes[0]: ");
evt_str(&str, evts->pipes[0]);
komeda_sprintf(&str, ", pipes[1]: ");
evt_str(&str, evts->pipes[1]);
DRM_ERROR("err detect: %s\n", msg);
if ((err_verbosity & KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT) &&
(evts_mask & (KOMEDA_ERR_EVENTS | KOMEDA_WARN_EVENTS)))
drm_state_dump(dev, &p);
en_print = false;
}
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <drm/drm_framebuffer.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
static int
komeda_wb_init_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_framebuffer *fb = conn_st->writeback_job->fb;
memset(dflow, 0, sizeof(*dflow));
dflow->out_w = fb->width;
dflow->out_h = fb->height;
/* the write back data comes from the compiz */
pipeline_composition_size(kcrtc_st, &dflow->in_w, &dflow->in_h);
dflow->input.component = &wb_layer->base.pipeline->compiz->base;
/* compiz doesn't output alpha */
dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
dflow->rot = DRM_MODE_ROTATE_0;
komeda_complete_data_flow_cfg(wb_layer, dflow, fb);
return 0;
}
static int
komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_st,
struct drm_connector_state *conn_st)
{
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct drm_writeback_job *writeback_job = conn_st->writeback_job;
struct komeda_layer *wb_layer;
struct komeda_data_flow_cfg dflow;
int err;
if (!writeback_job)
return 0;
if (!crtc_st->active) {
DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
return -EINVAL;
}
wb_layer = to_kconn(to_wb_conn(conn_st->connector))->wb_layer;
/*
* No need for a full modested when the only connector changed is the
* writeback connector.
*/
if (crtc_st->connectors_changed &&
is_only_changed_connector(crtc_st, conn_st->connector))
crtc_st->connectors_changed = false;
err = komeda_wb_init_data_flow(wb_layer, conn_st, kcrtc_st, &dflow);
if (err)
return err;
if (dflow.en_split)
err = komeda_build_wb_split_data_flow(wb_layer,
conn_st, kcrtc_st, &dflow);
else
err = komeda_build_wb_data_flow(wb_layer,
conn_st, kcrtc_st, &dflow);
return err;
}
static const struct drm_encoder_helper_funcs komeda_wb_encoder_helper_funcs = {
.atomic_check = komeda_wb_encoder_atomic_check,
};
static int
komeda_wb_connector_get_modes(struct drm_connector *connector)
{
return 0;
}
static enum drm_mode_status
komeda_wb_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
int w = mode->hdisplay, h = mode->vdisplay;
if ((w < mode_config->min_width) || (w > mode_config->max_width))
return MODE_BAD_HVALUE;
if ((h < mode_config->min_height) || (h > mode_config->max_height))
return MODE_BAD_VVALUE;
return MODE_OK;
}
static const struct drm_connector_helper_funcs komeda_wb_conn_helper_funcs = {
.get_modes = komeda_wb_connector_get_modes,
.mode_valid = komeda_wb_connector_mode_valid,
};
static enum drm_connector_status
komeda_wb_connector_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static int
komeda_wb_connector_fill_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return 0;
}
static void komeda_wb_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
kfree(to_kconn(to_wb_conn(connector)));
}
static const struct drm_connector_funcs komeda_wb_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = komeda_wb_connector_detect,
.fill_modes = komeda_wb_connector_fill_modes,
.destroy = komeda_wb_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_crtc *kcrtc)
{
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
struct drm_display_info *info;
u32 *formats, n_formats = 0;
int err;
if (!kcrtc->master->wb_layer)
return 0;
kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
if (!kwb_conn)
return -ENOMEM;
kwb_conn->wb_layer = kcrtc->master->wb_layer;
wb_conn = &kwb_conn->base;
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
&komeda_wb_encoder_helper_funcs,
formats, n_formats,
BIT(drm_crtc_index(&kcrtc->base)));
komeda_put_fourcc_list(formats);
if (err) {
kfree(kwb_conn);
return err;
}
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
info = &kwb_conn->base.base.display_info;
info->bpc = __fls(kcrtc->master->improc->supported_color_depths);
info->color_formats = kcrtc->master->improc->supported_color_formats;
kcrtc->wb_conn = kwb_conn;
return 0;
}
int komeda_kms_add_wb_connectors(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
int i, err;
for (i = 0; i < kms->n_crtcs; i++) {
err = komeda_wb_connector_add(kms, &kms->crtcs[i]);
if (err)
return err;
}
return 0;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "komeda_framebuffer.h"
#include "komeda_dev.h"
static void komeda_fb_destroy(struct drm_framebuffer *fb)
{
struct komeda_fb *kfb = to_kfb(fb);
u32 i;
for (i = 0; i < fb->format->num_planes; i++)
drm_gem_object_put(fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(kfb);
}
static int komeda_fb_create_handle(struct drm_framebuffer *fb,
struct drm_file *file, u32 *handle)
{
return drm_gem_handle_create(file, fb->obj[0], handle);
}
static const struct drm_framebuffer_funcs komeda_fb_funcs = {
.destroy = komeda_fb_destroy,
.create_handle = komeda_fb_create_handle,
};
static int
komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
u64 min_size;
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
if (!obj) {
DRM_DEBUG_KMS("Failed to lookup GEM object\n");
return -ENOENT;
}
switch (fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
alignment_w = 32;
alignment_h = 8;
break;
case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
alignment_w = 16;
alignment_h = 16;
break;
default:
WARN(1, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
break;
}
/* tiled header afbc */
if (fb->modifier & AFBC_FORMAT_MOD_TILED) {
alignment_w *= AFBC_TH_LAYOUT_ALIGNMENT;
alignment_h *= AFBC_TH_LAYOUT_ALIGNMENT;
alignment_header = AFBC_TH_BODY_START_ALIGNMENT;
} else {
alignment_header = AFBC_BODY_START_ALIGNMENT;
}
kfb->aligned_w = ALIGN(fb->width, alignment_w);
kfb->aligned_h = ALIGN(fb->height, alignment_h);
if (fb->offsets[0] % alignment_header) {
DRM_DEBUG_KMS("afbc offset alignment check failed.\n");
goto check_failed;
}
n_blocks = (kfb->aligned_w * kfb->aligned_h) / AFBC_SUPERBLK_PIXELS;
kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
alignment_header);
bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
kfb->afbc_size = kfb->offset_payload + n_blocks *
ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
AFBC_SUPERBLK_ALIGNMENT);
min_size = kfb->afbc_size + fb->offsets[0];
if (min_size > obj->size) {
DRM_DEBUG_KMS("afbc size check failed, obj_size: 0x%zx. min_size 0x%llx.\n",
obj->size, min_size);
goto check_failed;
}
fb->obj[0] = obj;
return 0;
check_failed:
drm_gem_object_put(obj);
return -EINVAL;
}
static int
komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
u32 i, block_h;
u64 min_size;
if (komeda_fb_check_src_coords(kfb, 0, 0, fb->width, fb->height))
return -EINVAL;
for (i = 0; i < info->num_planes; i++) {
obj = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!obj) {
DRM_DEBUG_KMS("Failed to lookup GEM object\n");
return -ENOENT;
}
fb->obj[i] = obj;
block_h = drm_format_info_block_height(info, i);
if ((fb->pitches[i] * block_h) % mdev->chip.bus_width) {
DRM_DEBUG_KMS("Pitch[%d]: 0x%x doesn't align to 0x%x\n",
i, fb->pitches[i], mdev->chip.bus_width);
return -EINVAL;
}
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
- to_drm_gem_dma_obj(obj)->dma_addr;
if (obj->size < min_size) {
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
i, obj->size, min_size);
return -EINVAL;
}
}
if (fb->format->num_planes == 3) {
if (fb->pitches[1] != fb->pitches[2]) {
DRM_DEBUG_KMS("The pitch[1] and [2] are not same\n");
return -EINVAL;
}
}
return 0;
}
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct komeda_dev *mdev = dev->dev_private;
struct komeda_fb *kfb;
int ret = 0, i;
kfb = kzalloc(sizeof(*kfb), GFP_KERNEL);
if (!kfb)
return ERR_PTR(-ENOMEM);
kfb->format_caps = komeda_get_format_caps(&mdev->fmt_tbl,
mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!kfb->format_caps) {
DRM_DEBUG_KMS("FMT %x is not supported.\n",
mode_cmd->pixel_format);
kfree(kfb);
return ERR_PTR(-EINVAL);
}
drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
if (kfb->base.modifier)
ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
else
ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd);
if (ret < 0)
goto err_cleanup;
ret = drm_framebuffer_init(dev, &kfb->base, &komeda_fb_funcs);
if (ret < 0) {
DRM_DEBUG_KMS("failed to initialize fb\n");
goto err_cleanup;
}
kfb->is_va = mdev->iommu ? true : false;
return &kfb->base;
err_cleanup:
for (i = 0; i < kfb->base.format->num_planes; i++)
drm_gem_object_put(kfb->base.obj[i]);
kfree(kfb);
return ERR_PTR(ret);
}
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h)
{
const struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
u32 block_w = drm_format_info_block_width(fb->format, 0);
u32 block_h = drm_format_info_block_height(fb->format, 0);
if ((src_x + src_w > fb->width) || (src_y + src_h > fb->height)) {
DRM_DEBUG_ATOMIC("Invalid source coordinate.\n");
return -EINVAL;
}
if ((src_x % info->hsub) || (src_w % info->hsub) ||
(src_y % info->vsub) || (src_h % info->vsub)) {
DRM_DEBUG_ATOMIC("Wrong subsampling dimension x:%d, y:%d, w:%d, h:%d for format: %x.\n",
src_x, src_y, src_w, src_h, info->format);
return -EINVAL;
}
if ((src_x % block_w) || (src_w % block_w) ||
(src_y % block_h) || (src_h % block_h)) {
DRM_DEBUG_ATOMIC("x:%d, y:%d, w:%d, h:%d should be multiple of block_w/h for format: %x.\n",
src_x, src_y, src_w, src_h, info->format);
return -EINVAL;
}
return 0;
}
dma_addr_t
komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_gem_dma_object *obj;
u32 offset, plane_x, plane_y, block_w, block_sz;
if (plane >= fb->format->num_planes) {
DRM_DEBUG_KMS("Out of max plane num.\n");
return -EINVAL;
}
obj = drm_fb_dma_get_gem_obj(fb, plane);
offset = fb->offsets[plane];
if (!fb->modifier) {
block_w = drm_format_info_block_width(fb->format, plane);
block_sz = fb->format->char_per_block[plane];
plane_x = x / (plane ? fb->format->hsub : 1);
plane_y = y / (plane ? fb->format->vsub : 1);
offset += (plane_x / block_w) * block_sz
+ plane_y * fb->pitches[plane];
}
return obj->dma_addr + offset;
}
/* if the fb can be supported by a specific layer */
bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type,
u32 rot)
{
struct drm_framebuffer *fb = &kfb->base;
struct komeda_dev *mdev = fb->dev->dev_private;
u32 fourcc = fb->format->format;
u64 modifier = fb->modifier;
bool supported;
supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
fourcc, modifier, rot);
if (!supported)
DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %p4cc with modifier: 0x%llx.\n",
layer_type, &fourcc, modifier);
return supported;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <linux/slab.h>
#include "komeda_format_caps.h"
#include "malidp_utils.h"
const struct komeda_format_caps *
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier)
{
const struct komeda_format_caps *caps;
u64 afbc_features = modifier & ~(AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
u32 afbc_layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
int id;
for (id = 0; id < table->n_formats; id++) {
caps = &table->format_caps[id];
if (fourcc != caps->fourcc)
continue;
if ((modifier == 0ULL) && (caps->supported_afbc_layouts == 0))
return caps;
if (has_bits(afbc_features, caps->supported_afbc_features) &&
has_bit(afbc_layout, caps->supported_afbc_layouts))
return caps;
}
return NULL;
}
u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
{
u32 bpp;
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
bpp = 12;
break;
case DRM_FORMAT_YUV420_10BIT:
bpp = 15;
break;
default:
bpp = info->cpp[0] * 8;
break;
}
return bpp;
}
/* Two assumptions
* 1. RGB always has YTR
* 2. Tiled RGB always has SC
*/
u64 komeda_supported_modifiers[] = {
/* AFBC_16x16 + features: YUV+RGB both */
AFBC_16x16(0),
/* SPARSE */
AFBC_16x16(_SPARSE),
/* YTR + (SPARSE) */
AFBC_16x16(_YTR | _SPARSE),
AFBC_16x16(_YTR),
/* SPLIT + SPARSE + YTR RGB only */
/* split mode is only allowed for sparse mode */
AFBC_16x16(_SPLIT | _SPARSE | _YTR),
/* TILED + (SPARSE) */
/* TILED YUV format only */
AFBC_16x16(_TILED | _SPARSE),
AFBC_16x16(_TILED),
/* TILED + SC + (SPLIT+SPARSE | SPARSE) + (YTR) */
AFBC_16x16(_TILED | _SC | _SPLIT | _SPARSE | _YTR),
AFBC_16x16(_TILED | _SC | _SPARSE | _YTR),
AFBC_16x16(_TILED | _SC | _YTR),
/* AFBC_32x8 + features: which are RGB formats only */
/* YTR + (SPARSE) */
AFBC_32x8(_YTR | _SPARSE),
AFBC_32x8(_YTR),
/* SPLIT + SPARSE + (YTR) */
/* split mode is only allowed for sparse mode */
AFBC_32x8(_SPLIT | _SPARSE | _YTR),
/* TILED + SC + (SPLIT+SPARSE | SPARSE) + YTR */
AFBC_32x8(_TILED | _SC | _SPLIT | _SPARSE | _YTR),
AFBC_32x8(_TILED | _SC | _SPARSE | _YTR),
AFBC_32x8(_TILED | _SC | _YTR),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
bool komeda_format_mod_supported(struct komeda_format_caps_table *table,
u32 layer_type, u32 fourcc, u64 modifier,
u32 rot)
{
const struct komeda_format_caps *caps;
caps = komeda_get_format_caps(table, fourcc, modifier);
if (!caps)
return false;
if (!(caps->supported_layer_types & layer_type))
return false;
if (table->format_mod_supported)
return table->format_mod_supported(caps, layer_type, modifier,
rot);
return true;
}
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
u32 layer_type, u32 *n_fmts)
{
const struct komeda_format_caps *cap;
u32 *fmts;
int i, j, n = 0;
fmts = kcalloc(table->n_formats, sizeof(u32), GFP_KERNEL);
if (!fmts)
return NULL;
for (i = 0; i < table->n_formats; i++) {
cap = &table->format_caps[i];
if (!(layer_type & cap->supported_layer_types) ||
(cap->fourcc == 0))
continue;
/* one fourcc may has two caps items in table (afbc/none-afbc),
* so check the existing list to avoid adding a duplicated one.
*/
for (j = n - 1; j >= 0; j--)
if (fmts[j] == cap->fourcc)
break;
if (j < 0)
fmts[n++] = cap->fourcc;
}
if (n_fmts)
*n_fmts = n;
return fmts;
}
void komeda_put_fourcc_list(u32 *fourcc_list)
{
kfree(fourcc_list);
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
struct komeda_drv {
struct komeda_dev *mdev;
struct komeda_kms_dev *kms;
};
struct komeda_dev *dev_to_mdev(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? mdrv->mdev : NULL;
}
static void komeda_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct komeda_drv *mdrv = dev_get_drvdata(dev);
komeda_kms_detach(mdrv->kms);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
else
komeda_dev_suspend(mdrv->mdev);
komeda_dev_destroy(mdrv->mdev);
dev_set_drvdata(dev, NULL);
devm_kfree(dev, mdrv);
}
static int komeda_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct komeda_drv *mdrv;
int err;
mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
mdrv->mdev = komeda_dev_create(dev);
if (IS_ERR(mdrv->mdev)) {
err = PTR_ERR(mdrv->mdev);
goto free_mdrv;
}
pm_runtime_enable(dev);
if (!pm_runtime_enabled(dev))
komeda_dev_resume(mdrv->mdev);
mdrv->kms = komeda_kms_attach(mdrv->mdev);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
goto destroy_mdev;
}
dev_set_drvdata(dev, mdrv);
drm_fbdev_generic_setup(&mdrv->kms->base, 32);
return 0;
destroy_mdev:
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
else
komeda_dev_suspend(mdrv->mdev);
komeda_dev_destroy(mdrv->mdev);
free_mdrv:
devm_kfree(dev, mdrv);
return err;
}
static const struct of_device_id komeda_of_match[] = {
{ .compatible = "arm,mali-d71", .data = d71_identify, },
{ .compatible = "arm,mali-d32", .data = d71_identify, },
{},
};
MODULE_DEVICE_TABLE(of, komeda_of_match);
static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
return komeda_dev_suspend(mdrv->mdev);
}
static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
return komeda_dev_resume(mdrv->mdev);
}
static int __maybe_unused komeda_pm_suspend(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
int res;
res = drm_mode_config_helper_suspend(&mdrv->kms->base);
if (!pm_runtime_status_suspended(dev))
komeda_dev_suspend(mdrv->mdev);
return res;
}
static int __maybe_unused komeda_pm_resume(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
if (!pm_runtime_status_suspended(dev))
komeda_dev_resume(mdrv->mdev);
return drm_mode_config_helper_resume(&mdrv->kms->base);
}
static const struct dev_pm_ops komeda_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
SET_RUNTIME_PM_OPS(komeda_rt_pm_suspend, komeda_rt_pm_resume, NULL)
};
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
.remove_new = komeda_platform_remove,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
.pm = &komeda_pm_ops,
},
};
drm_module_platform_driver(komeda_platform_driver);
MODULE_AUTHOR("James.Qian.Wang <[email protected]>");
MODULE_DESCRIPTION("Komeda KMS driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
#include <drm/drm_print.h>
#include "komeda_dev.h"
static int komeda_register_show(struct seq_file *sf, void *x)
{
struct komeda_dev *mdev = sf->private;
int i;
seq_puts(sf, "\n====== Komeda register dump =========\n");
pm_runtime_get_sync(mdev->dev);
if (mdev->funcs->dump_register)
mdev->funcs->dump_register(mdev, sf);
for (i = 0; i < mdev->n_pipelines; i++)
komeda_pipeline_dump_register(mdev->pipelines[i], sf);
pm_runtime_put(mdev->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(komeda_register);
#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
{
if (!debugfs_initialized())
return;
mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
debugfs_create_file("register", 0444, mdev->debugfs_root,
mdev, &komeda_register_fops);
debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
&mdev->err_verbosity);
}
#endif
static ssize_t
core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct komeda_dev *mdev = dev_to_mdev(dev);
return sysfs_emit(buf, "0x%08x\n", mdev->chip.core_id);
}
static DEVICE_ATTR_RO(core_id);
static ssize_t
config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct komeda_dev *mdev = dev_to_mdev(dev);
struct komeda_pipeline *pipe = mdev->pipelines[0];
union komeda_config_id config_id;
int i;
memset(&config_id, 0, sizeof(config_id));
config_id.max_line_sz = pipe->layers[0]->hsize_in.end;
config_id.n_pipelines = mdev->n_pipelines;
config_id.n_scalers = pipe->n_scalers;
config_id.n_layers = pipe->n_layers;
config_id.n_richs = 0;
for (i = 0; i < pipe->n_layers; i++) {
if (pipe->layers[i]->layer_type == KOMEDA_FMT_RICH_LAYER)
config_id.n_richs++;
}
return sysfs_emit(buf, "0x%08x\n", config_id.value);
}
static DEVICE_ATTR_RO(config_id);
static ssize_t
aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct komeda_dev *mdev = dev_to_mdev(dev);
return sysfs_emit(buf, "%lu\n", clk_get_rate(mdev->aclk));
}
static DEVICE_ATTR_RO(aclk_hz);
static struct attribute *komeda_sysfs_entries[] = {
&dev_attr_core_id.attr,
&dev_attr_config_id.attr,
&dev_attr_aclk_hz.attr,
NULL,
};
static struct attribute_group komeda_sysfs_attr_group = {
.attrs = komeda_sysfs_entries,
};
static int komeda_parse_pipe_dt(struct komeda_pipeline *pipe)
{
struct device_node *np = pipe->of_node;
struct clk *clk;
clk = of_clk_get_by_name(np, "pxclk");
if (IS_ERR(clk)) {
DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe->id);
return PTR_ERR(clk);
}
pipe->pxlclk = clk;
/* enum ports */
pipe->of_output_links[0] =
of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 0);
pipe->of_output_links[1] =
of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 1);
pipe->of_output_port =
of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
pipe->dual_link = pipe->of_output_links[0] && pipe->of_output_links[1];
return 0;
}
static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
struct komeda_pipeline *pipe;
u32 pipe_id = U32_MAX;
int ret = -1;
mdev->irq = platform_get_irq(pdev, 0);
if (mdev->irq < 0) {
DRM_ERROR("could not get IRQ number.\n");
return mdev->irq;
}
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
return ret;
for_each_available_child_of_node(np, child) {
if (of_node_name_eq(child, "pipeline")) {
of_property_read_u32(child, "reg", &pipe_id);
if (pipe_id >= mdev->n_pipelines) {
DRM_WARN("Skip the redundant DT node: pipeline-%u.\n",
pipe_id);
continue;
}
mdev->pipelines[pipe_id]->of_node = of_node_get(child);
}
}
for (pipe_id = 0; pipe_id < mdev->n_pipelines; pipe_id++) {
pipe = mdev->pipelines[pipe_id];
if (!pipe->of_node) {
DRM_ERROR("Pipeline-%d doesn't have a DT node.\n",
pipe->id);
return -EINVAL;
}
ret = komeda_parse_pipe_dt(pipe);
if (ret)
return ret;
}
return 0;
}
struct komeda_dev *komeda_dev_create(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
komeda_identify_func komeda_identify;
struct komeda_dev *mdev;
int err = 0;
komeda_identify = of_device_get_match_data(dev);
if (!komeda_identify)
return ERR_PTR(-ENODEV);
mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return ERR_PTR(-ENOMEM);
mutex_init(&mdev->lock);
mdev->dev = dev;
mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->reg_base)) {
DRM_ERROR("Map register space failed.\n");
err = PTR_ERR(mdev->reg_base);
mdev->reg_base = NULL;
goto err_cleanup;
}
mdev->aclk = devm_clk_get(dev, "aclk");
if (IS_ERR(mdev->aclk)) {
DRM_ERROR("Get engine clk failed.\n");
err = PTR_ERR(mdev->aclk);
mdev->aclk = NULL;
goto err_cleanup;
}
clk_prepare_enable(mdev->aclk);
mdev->funcs = komeda_identify(mdev->reg_base, &mdev->chip);
if (!mdev->funcs) {
DRM_ERROR("Failed to identify the HW.\n");
err = -ENODEV;
goto disable_clk;
}
DRM_INFO("Found ARM Mali-D%x version r%dp%d\n",
MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id),
MALIDP_CORE_ID_MAJOR(mdev->chip.core_id),
MALIDP_CORE_ID_MINOR(mdev->chip.core_id));
mdev->funcs->init_format_table(mdev);
err = mdev->funcs->enum_resources(mdev);
if (err) {
DRM_ERROR("enumerate display resource failed.\n");
goto disable_clk;
}
err = komeda_parse_dt(dev, mdev);
if (err) {
DRM_ERROR("parse device tree failed.\n");
goto disable_clk;
}
err = komeda_assemble_pipelines(mdev);
if (err) {
DRM_ERROR("assemble display pipelines failed.\n");
goto disable_clk;
}
dma_set_max_seg_size(dev, U32_MAX);
mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
if (!mdev->iommu)
DRM_INFO("continue without IOMMU support!\n");
clk_disable_unprepare(mdev->aclk);
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
goto err_cleanup;
}
mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
#ifdef CONFIG_DEBUG_FS
komeda_debugfs_init(mdev);
#endif
return mdev;
disable_clk:
clk_disable_unprepare(mdev->aclk);
err_cleanup:
komeda_dev_destroy(mdev);
return ERR_PTR(err);
}
void komeda_dev_destroy(struct komeda_dev *mdev)
{
struct device *dev = mdev->dev;
const struct komeda_dev_funcs *funcs = mdev->funcs;
int i;
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(mdev->debugfs_root);
#endif
if (mdev->aclk)
clk_prepare_enable(mdev->aclk);
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev->pipelines[i] = NULL;
}
mdev->n_pipelines = 0;
of_reserved_mem_device_release(dev);
if (funcs && funcs->cleanup)
funcs->cleanup(mdev);
if (mdev->reg_base) {
devm_iounmap(dev, mdev->reg_base);
mdev->reg_base = NULL;
}
if (mdev->aclk) {
clk_disable_unprepare(mdev->aclk);
devm_clk_put(dev, mdev->aclk);
mdev->aclk = NULL;
}
devm_kfree(dev, mdev);
}
int komeda_dev_resume(struct komeda_dev *mdev)
{
clk_prepare_enable(mdev->aclk);
mdev->funcs->enable_irq(mdev);
if (mdev->iommu && mdev->funcs->connect_iommu)
if (mdev->funcs->connect_iommu(mdev))
DRM_ERROR("connect iommu failed.\n");
return 0;
}
int komeda_dev_suspend(struct komeda_dev *mdev)
{
if (mdev->iommu && mdev->funcs->disconnect_iommu)
if (mdev->funcs->disconnect_iommu(mdev))
DRM_ERROR("disconnect iommu failed.\n");
mdev->funcs->disable_irq(mdev);
clk_disable_unprepare(mdev->aclk);
return 0;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/komeda_dev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include <drm/drm_blend.h>
#include <drm/drm_print.h>
#include "d71_dev.h"
#include "malidp_io.h"
static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
{
u32 __iomem *reg = d71_pipeline->lpu_addr;
u32 status, raw_status;
u64 evts = 0ULL;
raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
if (raw_status & LPU_IRQ_IBSY)
evts |= KOMEDA_EVENT_IBSY;
if (raw_status & LPU_IRQ_EOW)
evts |= KOMEDA_EVENT_EOW;
if (raw_status & LPU_IRQ_OVR)
evts |= KOMEDA_EVENT_OVR;
if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
u32 restore = 0, tbu_status;
/* Check error of LPU status */
status = malidp_read32(reg, BLK_STATUS);
if (status & LPU_STATUS_AXIE) {
restore |= LPU_STATUS_AXIE;
evts |= KOMEDA_ERR_AXIE;
}
if (status & LPU_STATUS_ACE0) {
restore |= LPU_STATUS_ACE0;
evts |= KOMEDA_ERR_ACE0;
}
if (status & LPU_STATUS_ACE1) {
restore |= LPU_STATUS_ACE1;
evts |= KOMEDA_ERR_ACE1;
}
if (status & LPU_STATUS_ACE2) {
restore |= LPU_STATUS_ACE2;
evts |= KOMEDA_ERR_ACE2;
}
if (status & LPU_STATUS_ACE3) {
restore |= LPU_STATUS_ACE3;
evts |= KOMEDA_ERR_ACE3;
}
if (status & LPU_STATUS_FEMPTY) {
restore |= LPU_STATUS_FEMPTY;
evts |= KOMEDA_EVENT_EMPTY;
}
if (status & LPU_STATUS_FFULL) {
restore |= LPU_STATUS_FFULL;
evts |= KOMEDA_EVENT_FULL;
}
if (restore != 0)
malidp_write32_mask(reg, BLK_STATUS, restore, 0);
restore = 0;
/* Check errors of TBU status */
tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
if (tbu_status & LPU_TBU_STATUS_TCF) {
restore |= LPU_TBU_STATUS_TCF;
evts |= KOMEDA_ERR_TCF;
}
if (tbu_status & LPU_TBU_STATUS_TTNG) {
restore |= LPU_TBU_STATUS_TTNG;
evts |= KOMEDA_ERR_TTNG;
}
if (tbu_status & LPU_TBU_STATUS_TITR) {
restore |= LPU_TBU_STATUS_TITR;
evts |= KOMEDA_ERR_TITR;
}
if (tbu_status & LPU_TBU_STATUS_TEMR) {
restore |= LPU_TBU_STATUS_TEMR;
evts |= KOMEDA_ERR_TEMR;
}
if (tbu_status & LPU_TBU_STATUS_TTF) {
restore |= LPU_TBU_STATUS_TTF;
evts |= KOMEDA_ERR_TTF;
}
if (restore != 0)
malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
}
malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
return evts;
}
static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
{
u32 __iomem *reg = d71_pipeline->cu_addr;
u32 status, raw_status;
u64 evts = 0ULL;
raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
if (raw_status & CU_IRQ_OVR)
evts |= KOMEDA_EVENT_OVR;
if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
if (status & CU_STATUS_CPE)
evts |= KOMEDA_ERR_CPE;
if (status & CU_STATUS_ZME)
evts |= KOMEDA_ERR_ZME;
if (status & CU_STATUS_CFGE)
evts |= KOMEDA_ERR_CFGE;
if (status)
malidp_write32_mask(reg, BLK_STATUS, status, 0);
}
malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
return evts;
}
static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
{
u32 __iomem *reg = d71_pipeline->dou_addr;
u32 status, raw_status;
u64 evts = 0ULL;
raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
if (raw_status & DOU_IRQ_PL0)
evts |= KOMEDA_EVENT_VSYNC;
if (raw_status & DOU_IRQ_UND)
evts |= KOMEDA_EVENT_URUN;
if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
u32 restore = 0;
status = malidp_read32(reg, BLK_STATUS);
if (status & DOU_STATUS_DRIFTTO) {
restore |= DOU_STATUS_DRIFTTO;
evts |= KOMEDA_ERR_DRIFTTO;
}
if (status & DOU_STATUS_FRAMETO) {
restore |= DOU_STATUS_FRAMETO;
evts |= KOMEDA_ERR_FRAMETO;
}
if (status & DOU_STATUS_TETO) {
restore |= DOU_STATUS_TETO;
evts |= KOMEDA_ERR_TETO;
}
if (status & DOU_STATUS_CSCE) {
restore |= DOU_STATUS_CSCE;
evts |= KOMEDA_ERR_CSCE;
}
if (restore != 0)
malidp_write32_mask(reg, BLK_STATUS, restore, 0);
}
malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
return evts;
}
static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
{
u32 evts = 0ULL;
if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
evts |= get_lpu_event(d71_pipeline);
if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
evts |= get_cu_event(d71_pipeline);
if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
evts |= get_dou_event(d71_pipeline);
return evts;
}
static irqreturn_t
d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
{
struct d71_dev *d71 = mdev->chip_data;
u32 status, gcu_status, raw_status;
gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
if (gcu_status & GLB_IRQ_STATUS_GCU) {
raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
if (raw_status & GCU_IRQ_CVAL0)
evts->pipes[0] |= KOMEDA_EVENT_FLIP;
if (raw_status & GCU_IRQ_CVAL1)
evts->pipes[1] |= KOMEDA_EVENT_FLIP;
if (raw_status & GCU_IRQ_ERR) {
status = malidp_read32(d71->gcu_addr, BLK_STATUS);
if (status & GCU_STATUS_MERR) {
evts->global |= KOMEDA_ERR_MERR;
malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
GCU_STATUS_MERR, 0);
}
}
malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
}
if (gcu_status & GLB_IRQ_STATUS_PIPE0)
evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
if (gcu_status & GLB_IRQ_STATUS_PIPE1)
evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
return IRQ_RETVAL(gcu_status);
}
#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
GCU_IRQ_MODE | GCU_IRQ_ERR)
#define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
#define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR)
#define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR)
static int d71_enable_irq(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
struct d71_pipeline *pipe;
u32 i;
malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
for (i = 0; i < d71->num_pipelines; i++) {
pipe = d71->pipes[i];
malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
ENABLED_CU_IRQS, ENABLED_CU_IRQS);
malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
}
return 0;
}
static int d71_disable_irq(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
struct d71_pipeline *pipe;
u32 i;
malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
for (i = 0; i < d71->num_pipelines; i++) {
pipe = d71->pipes[i];
malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
ENABLED_CU_IRQS, 0);
malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
ENABLED_LPU_IRQS, 0);
malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
ENABLED_DOU_IRQS, 0);
}
return 0;
}
static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
{
struct d71_dev *d71 = mdev->chip_data;
struct d71_pipeline *pipe = d71->pipes[master_pipe];
malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
}
static int to_d71_opmode(int core_mode)
{
switch (core_mode) {
case KOMEDA_MODE_DISP0:
return DO0_ACTIVE_MODE;
case KOMEDA_MODE_DISP1:
return DO1_ACTIVE_MODE;
case KOMEDA_MODE_DUAL_DISP:
return DO01_ACTIVE_MODE;
case KOMEDA_MODE_INACTIVE:
return INACTIVE_MODE;
default:
WARN(1, "Unknown operation mode");
return INACTIVE_MODE;
}
}
static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
{
struct d71_dev *d71 = mdev->chip_data;
u32 opmode = to_d71_opmode(new_mode);
int ret;
malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
100, 1000, 10000);
return ret;
}
static void d71_flush(struct komeda_dev *mdev,
int master_pipe, u32 active_pipes)
{
struct d71_dev *d71 = mdev->chip_data;
u32 reg_offset = (master_pipe == 0) ?
GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
}
static int d71_reset(struct d71_dev *d71)
{
u32 __iomem *gcu = d71->gcu_addr;
int ret;
malidp_write32(gcu, BLK_CONTROL, GCU_CONTROL_SRST);
ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
100, 1000, 10000);
return ret;
}
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
{
int i;
blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
return;
blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
/* get valid input and output ids */
for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
}
static void d71_cleanup(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
if (!d71)
return;
devm_kfree(mdev->dev, d71);
mdev->chip_data = NULL;
}
static int d71_enum_resources(struct komeda_dev *mdev)
{
struct d71_dev *d71;
struct komeda_pipeline *pipe;
struct block_header blk;
u32 __iomem *blk_base;
u32 i, value, offset;
int err;
d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
if (!d71)
return -ENOMEM;
mdev->chip_data = d71;
d71->mdev = mdev;
d71->gcu_addr = mdev->reg_base;
d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
err = d71_reset(d71);
if (err) {
DRM_ERROR("Fail to reset d71 device.\n");
goto err_cleanup;
}
/* probe GCU */
value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
d71->num_blocks = value & 0xFF;
d71->num_pipelines = (value >> 8) & 0x7;
if (d71->num_pipelines > D71_MAX_PIPELINE) {
DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
D71_MAX_PIPELINE, d71->num_pipelines);
err = -EINVAL;
goto err_cleanup;
}
/* Only the legacy HW has the periph block, the newer merges the periph
* into GCU
*/
value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
d71->periph_addr = NULL;
if (d71->periph_addr) {
/* probe PERIPHERAL in legacy HW */
value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
d71->max_vsize = 4096;
d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
d71->supports_dual_link = !!(value & PERIPH_SPLIT_EN);
d71->integrates_tbu = !!(value & PERIPH_TBU_EN);
} else {
value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
d71->max_line_size = GCU_MAX_LINE_SIZE(value);
d71->max_vsize = GCU_MAX_NUM_LINES(value);
value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
d71->num_rich_layers = GCU_NUM_RICH_LAYERS(value);
d71->supports_dual_link = GCU_DISPLAY_SPLIT_EN(value);
d71->integrates_tbu = GCU_DISPLAY_TBU_EN(value);
}
for (i = 0; i < d71->num_pipelines; i++) {
pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
&d71_pipeline_funcs);
if (IS_ERR(pipe)) {
err = PTR_ERR(pipe);
goto err_cleanup;
}
/* D71 HW doesn't update shadow registers when display output
* is turning off, so when we disable all pipeline components
* together with display output disable by one flush or one
* operation, the disable operation updated registers will not
* be flush to or valid in HW, which may leads problem.
* To workaround this problem, introduce a two phase disable.
* Phase1: Disabling components with display is on to make sure
* the disable can be flushed to HW.
* Phase2: Only turn-off display output.
*/
value = KOMEDA_PIPELINE_IMPROCS |
BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
pipe->standalone_disabled_comps = value;
d71->pipes[i] = to_d71_pipeline(pipe);
}
/* loop the register blks and probe.
* NOTE: d71->num_blocks includes reserved blocks.
* d71->num_blocks = GCU + valid blocks + reserved blocks
*/
i = 1; /* exclude GCU */
offset = D71_BLOCK_SIZE; /* skip GCU */
while (i < d71->num_blocks) {
blk_base = mdev->reg_base + (offset >> 2);
d71_read_block_header(blk_base, &blk);
if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
err = d71_probe_block(d71, &blk, blk_base);
if (err)
goto err_cleanup;
}
i++;
offset += D71_BLOCK_SIZE;
}
DRM_DEBUG("total %d (out of %d) blocks are found.\n",
i, d71->num_blocks);
return 0;
err_cleanup:
d71_cleanup(mdev);
return err;
}
#define __HW_ID(__group, __format) \
((((__group) & 0x7) << 3) | ((__format) & 0x7))
#define RICH KOMEDA_FMT_RICH_LAYER
#define SIMPLE KOMEDA_FMT_SIMPLE_LAYER
#define RICH_SIMPLE (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
#define RICH_WB (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
#define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
#define Rot_0 DRM_MODE_ROTATE_0
#define Flip_H_V (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
#define Rot_ALL_H_V (DRM_MODE_ROTATE_MASK | Flip_H_V)
#define LYT_NM BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
#define LYT_WB BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
#define LYT_NM_WB (LYT_NM | LYT_WB)
#define AFB_TH AFBC(_TILED | _SPARSE)
#define AFB_TH_SC_YTR AFBC(_TILED | _SC | _SPARSE | _YTR)
#define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
static struct komeda_format_caps d71_format_caps_table[] = {
/* HW_ID | fourcc | layer_types | rots | afbc_layouts | afbc_features */
/* ABGR_2101010*/
{__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
/* ABGR_8888*/
{__HW_ID(1, 0), DRM_FORMAT_ARGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(1, 2), DRM_FORMAT_RGBA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 3), DRM_FORMAT_BGRA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
/* XBGB_8888 */
{__HW_ID(2, 0), DRM_FORMAT_XRGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 1), DRM_FORMAT_XBGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 2), DRM_FORMAT_RGBX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 3), DRM_FORMAT_BGRX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
/* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */
{__HW_ID(3, 0), DRM_FORMAT_RGB888, RICH_SIMPLE_WB, Rot_0, 0, 0},
{__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE_WB, Rot_0, 0, 0},
{__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
/* BGR 16bpp */
{__HW_ID(4, 0), DRM_FORMAT_RGBA5551, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
{__HW_ID(4, 2), DRM_FORMAT_RGB565, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
{__HW_ID(4, 4), DRM_FORMAT_R8, SIMPLE, Rot_0, 0, 0},
/* YUV 444/422/420 8bit */
{__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0},
/* YUV 10bit*/
{__HW_ID(6, 6), DRM_FORMAT_X0L2, RICH, Flip_H_V, 0, 0},
{__HW_ID(6, 7), DRM_FORMAT_P010, RICH, Flip_H_V, 0, 0},
{__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
};
static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
u32 layer_type, u64 modifier, u32 rot)
{
uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
drm_rotation_90_or_270(rot)) {
DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
return false;
}
return true;
}
static void d71_init_fmt_tbl(struct komeda_dev *mdev)
{
struct komeda_format_caps_table *table = &mdev->fmt_tbl;
table->format_caps = d71_format_caps_table;
table->format_mod_supported = d71_format_mod_supported;
table->n_formats = ARRAY_SIZE(d71_format_caps_table);
}
static int d71_connect_iommu(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
u32 __iomem *reg = d71->gcu_addr;
u32 check_bits = (d71->num_pipelines == 2) ?
GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
int i, ret;
if (!d71->integrates_tbu)
return -1;
malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
100, 1000, 1000);
if (ret < 0) {
DRM_ERROR("timed out connecting to TCU!\n");
malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
return ret;
}
for (i = 0; i < d71->num_pipelines; i++)
malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
return 0;
}
static int d71_disconnect_iommu(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
u32 __iomem *reg = d71->gcu_addr;
u32 check_bits = (d71->num_pipelines == 2) ?
GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
int ret;
malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
100, 1000, 1000);
if (ret < 0) {
DRM_ERROR("timed out disconnecting from TCU!\n");
malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
}
return ret;
}
static const struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
.cleanup = d71_cleanup,
.irq_handler = d71_irq_handler,
.enable_irq = d71_enable_irq,
.disable_irq = d71_disable_irq,
.on_off_vblank = d71_on_off_vblank,
.change_opmode = d71_change_opmode,
.flush = d71_flush,
.connect_iommu = d71_connect_iommu,
.disconnect_iommu = d71_disconnect_iommu,
.dump_register = d71_dump,
};
const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
const struct komeda_dev_funcs *funcs;
u32 product_id;
chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
switch (product_id) {
case MALIDP_D71_PRODUCT_ID:
case MALIDP_D32_PRODUCT_ID:
funcs = &d71_chip_funcs;
break;
default:
DRM_ERROR("Unsupported product: 0x%x\n", product_id);
return NULL;
}
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
chip->bus_width = D71_BUS_WIDTH_16_BYTES;
return funcs;
}
| linux-master | drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <[email protected]>
*
*/
#include "d71_dev.h"
#include "komeda_kms.h"
#include "malidp_io.h"
#include "komeda_framebuffer.h"
#include "komeda_color_mgmt.h"
static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id)
{
u32 id = BLOCK_INFO_BLK_ID(hw_id);
u32 pipe = id;
switch (BLOCK_INFO_BLK_TYPE(hw_id)) {
case D71_BLK_TYPE_LPU_WB_LAYER:
id = KOMEDA_COMPONENT_WB_LAYER;
break;
case D71_BLK_TYPE_CU_SPLITTER:
id = KOMEDA_COMPONENT_SPLITTER;
break;
case D71_BLK_TYPE_CU_SCALER:
pipe = id / D71_PIPELINE_MAX_SCALERS;
id %= D71_PIPELINE_MAX_SCALERS;
id += KOMEDA_COMPONENT_SCALER0;
break;
case D71_BLK_TYPE_CU:
id += KOMEDA_COMPONENT_COMPIZ0;
break;
case D71_BLK_TYPE_LPU_LAYER:
pipe = id / D71_PIPELINE_MAX_LAYERS;
id %= D71_PIPELINE_MAX_LAYERS;
id += KOMEDA_COMPONENT_LAYER0;
break;
case D71_BLK_TYPE_DOU_IPS:
id += KOMEDA_COMPONENT_IPS0;
break;
case D71_BLK_TYPE_CU_MERGER:
id = KOMEDA_COMPONENT_MERGER;
break;
case D71_BLK_TYPE_DOU:
id = KOMEDA_COMPONENT_TIMING_CTRLR;
break;
default:
id = 0xFFFFFFFF;
}
if (comp_id)
*comp_id = id;
if (pipe_id)
*pipe_id = pipe;
}
static u32 get_valid_inputs(struct block_header *blk)
{
u32 valid_inputs = 0, comp_id;
int i;
for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) {
get_resources_id(blk->input_ids[i], NULL, &comp_id);
if (comp_id == 0xFFFFFFFF)
continue;
valid_inputs |= BIT(comp_id);
}
return valid_inputs;
}
static void get_values_from_reg(void __iomem *reg, u32 offset,
u32 count, u32 *val)
{
u32 i, addr;
for (i = 0; i < count; i++) {
addr = offset + (i << 2);
/* 0xA4 is WO register */
if (addr != 0xA4)
val[i] = malidp_read32(reg, addr);
else
val[i] = 0xDEADDEAD;
}
}
static void dump_block_header(struct seq_file *sf, void __iomem *reg)
{
struct block_header hdr;
u32 i, n_input, n_output;
d71_read_block_header(reg, &hdr);
seq_printf(sf, "BLOCK_INFO:\t\t0x%X\n", hdr.block_info);
seq_printf(sf, "PIPELINE_INFO:\t\t0x%X\n", hdr.pipeline_info);
n_output = PIPELINE_INFO_N_OUTPUTS(hdr.pipeline_info);
n_input = PIPELINE_INFO_N_VALID_INPUTS(hdr.pipeline_info);
for (i = 0; i < n_input; i++)
seq_printf(sf, "VALID_INPUT_ID%u:\t0x%X\n",
i, hdr.input_ids[i]);
for (i = 0; i < n_output; i++)
seq_printf(sf, "OUTPUT_ID%u:\t\t0x%X\n",
i, hdr.output_ids[i]);
}
/* On D71, we are using the global line size. From D32, every component have
* a line size register to indicate the fifo size.
*/
static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg,
u32 max_default)
{
if (!d71->periph_addr)
max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE);
return max_default;
}
static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg)
{
return __get_blk_line_size(d71, reg, d71->max_line_size);
}
static u32 to_rot_ctrl(u32 rot)
{
u32 lr_ctrl = 0;
switch (rot & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_0:
lr_ctrl |= L_ROT(L_ROT_R0);
break;
case DRM_MODE_ROTATE_90:
lr_ctrl |= L_ROT(L_ROT_R90);
break;
case DRM_MODE_ROTATE_180:
lr_ctrl |= L_ROT(L_ROT_R180);
break;
case DRM_MODE_ROTATE_270:
lr_ctrl |= L_ROT(L_ROT_R270);
break;
}
if (rot & DRM_MODE_REFLECT_X)
lr_ctrl |= L_HFLIP;
if (rot & DRM_MODE_REFLECT_Y)
lr_ctrl |= L_VFLIP;
return lr_ctrl;
}
static u32 to_ad_ctrl(u64 modifier)
{
u32 afbc_ctrl = AD_AEN;
if (!modifier)
return 0;
if ((modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
afbc_ctrl |= AD_WB;
if (modifier & AFBC_FORMAT_MOD_YTR)
afbc_ctrl |= AD_YT;
if (modifier & AFBC_FORMAT_MOD_SPLIT)
afbc_ctrl |= AD_BS;
if (modifier & AFBC_FORMAT_MOD_TILED)
afbc_ctrl |= AD_TH;
return afbc_ctrl;
}
static inline u32 to_d71_input_id(struct komeda_component_state *st, int idx)
{
struct komeda_component_output *input = &st->inputs[idx];
/* if input is not active, set hw input_id(0) to disable it */
if (has_bit(idx, st->active_inputs))
return input->component->hw_id + input->output_port;
else
return 0;
}
static void d71_layer_update_fb(struct komeda_component *c,
struct komeda_fb *kfb,
dma_addr_t *addr)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
u32 __iomem *reg = c->reg;
int block_h;
if (info->num_planes > 2)
malidp_write64(reg, BLK_P2_PTR_LOW, addr[2]);
if (info->num_planes > 1) {
block_h = drm_format_info_block_height(info, 1);
malidp_write32(reg, BLK_P1_STRIDE, fb->pitches[1] * block_h);
malidp_write64(reg, BLK_P1_PTR_LOW, addr[1]);
}
block_h = drm_format_info_block_height(info, 0);
malidp_write32(reg, BLK_P0_STRIDE, fb->pitches[0] * block_h);
malidp_write64(reg, BLK_P0_PTR_LOW, addr[0]);
malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id);
}
static void d71_layer_disable(struct komeda_component *c)
{
malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0);
}
static void d71_layer_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_layer_state *st = to_layer_st(state);
struct drm_plane_state *plane_st = state->plane->state;
struct drm_framebuffer *fb = plane_st->fb;
struct komeda_fb *kfb = to_kfb(fb);
u32 __iomem *reg = c->reg;
u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN;
u32 ctrl = L_EN | to_rot_ctrl(st->rot);
d71_layer_update_fb(c, kfb, st->addr);
malidp_write32(reg, AD_CONTROL, to_ad_ctrl(fb->modifier));
if (fb->modifier) {
u64 addr;
malidp_write32(reg, LAYER_AD_H_CROP, HV_CROP(st->afbc_crop_l,
st->afbc_crop_r));
malidp_write32(reg, LAYER_AD_V_CROP, HV_CROP(st->afbc_crop_t,
st->afbc_crop_b));
/* afbc 1.2 wants payload, afbc 1.0/1.1 wants end_addr */
if (fb->modifier & AFBC_FORMAT_MOD_TILED)
addr = st->addr[0] + kfb->offset_payload;
else
addr = st->addr[0] + kfb->afbc_size - 1;
malidp_write32(reg, BLK_P1_PTR_LOW, lower_32_bits(addr));
malidp_write32(reg, BLK_P1_PTR_HIGH, upper_32_bits(addr));
}
if (fb->format->is_yuv) {
u32 upsampling = 0;
switch (kfb->format_caps->fourcc) {
case DRM_FORMAT_YUYV:
upsampling = fb->modifier ? LR_CHI422_BILINEAR :
LR_CHI422_REPLICATION;
break;
case DRM_FORMAT_UYVY:
upsampling = LR_CHI422_REPLICATION;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420_8BIT:
case DRM_FORMAT_YUV420_10BIT:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_P010:
/* these fmt support MPGE/JPEG both, here perfer JPEG*/
upsampling = LR_CHI420_JPEG;
break;
case DRM_FORMAT_X0L2:
upsampling = LR_CHI420_JPEG;
break;
default:
break;
}
malidp_write32(reg, LAYER_R_CONTROL, upsampling);
malidp_write_group(reg, LAYER_YUV_RGB_COEFF0,
KOMEDA_N_YUV2RGB_COEFFS,
komeda_select_yuv2rgb_coeffs(
plane_st->color_encoding,
plane_st->color_range));
}
malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
if (kfb->is_va)
ctrl |= L_TBU_EN;
malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl);
}
static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[15], i;
bool rich, rgb2rgb;
char *prefix;
get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]);
if (v[14] & 0x1) {
rich = true;
prefix = "LR_";
} else {
rich = false;
prefix = "LS_";
}
rgb2rgb = !!(v[14] & L_INFO_CM);
dump_block_header(sf, c->reg);
seq_printf(sf, "%sLAYER_INFO:\t\t0x%X\n", prefix, v[14]);
get_values_from_reg(c->reg, 0xD0, 1, v);
seq_printf(sf, "%sCONTROL:\t\t0x%X\n", prefix, v[0]);
if (rich) {
get_values_from_reg(c->reg, 0xD4, 1, v);
seq_printf(sf, "LR_RICH_CONTROL:\t0x%X\n", v[0]);
}
get_values_from_reg(c->reg, 0xD8, 4, v);
seq_printf(sf, "%sFORMAT:\t\t0x%X\n", prefix, v[0]);
seq_printf(sf, "%sIT_COEFFTAB:\t\t0x%X\n", prefix, v[1]);
seq_printf(sf, "%sIN_SIZE:\t\t0x%X\n", prefix, v[2]);
seq_printf(sf, "%sPALPHA:\t\t0x%X\n", prefix, v[3]);
get_values_from_reg(c->reg, 0x100, 3, v);
seq_printf(sf, "%sP0_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
seq_printf(sf, "%sP0_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
seq_printf(sf, "%sP0_STRIDE:\t\t0x%X\n", prefix, v[2]);
get_values_from_reg(c->reg, 0x110, 2, v);
seq_printf(sf, "%sP1_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
seq_printf(sf, "%sP1_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
if (rich) {
get_values_from_reg(c->reg, 0x118, 1, v);
seq_printf(sf, "LR_P1_STRIDE:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0x120, 2, v);
seq_printf(sf, "LR_P2_PTR_LOW:\t\t0x%X\n", v[0]);
seq_printf(sf, "LR_P2_PTR_HIGH:\t\t0x%X\n", v[1]);
get_values_from_reg(c->reg, 0x130, 12, v);
for (i = 0; i < 12; i++)
seq_printf(sf, "LR_YUV_RGB_COEFF%u:\t0x%X\n", i, v[i]);
}
if (rgb2rgb) {
get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v);
for (i = 0; i < 12; i++)
seq_printf(sf, "LS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
}
get_values_from_reg(c->reg, 0x160, 3, v);
seq_printf(sf, "%sAD_CONTROL:\t\t0x%X\n", prefix, v[0]);
seq_printf(sf, "%sAD_H_CROP:\t\t0x%X\n", prefix, v[1]);
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
static int d71_layer_validate(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_layer_state *st = to_layer_st(state);
struct komeda_layer *layer = to_layer(c);
struct drm_plane_state *plane_st;
struct drm_framebuffer *fb;
u32 fourcc, line_sz, max_line_sz;
plane_st = drm_atomic_get_new_plane_state(state->obj.state,
state->plane);
fb = plane_st->fb;
fourcc = fb->format->format;
if (drm_rotation_90_or_270(st->rot))
line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b;
else
line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r;
if (fb->modifier) {
if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
max_line_sz = layer->line_sz;
else
max_line_sz = layer->line_sz / 2;
if (line_sz > max_line_sz) {
DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n",
line_sz, max_line_sz);
return -EINVAL;
}
}
if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) {
DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n",
line_sz);
return -EINVAL;
}
if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) {
DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n",
line_sz);
return -EINVAL;
}
return 0;
}
static const struct komeda_component_funcs d71_layer_funcs = {
.validate = d71_layer_validate,
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
};
static int d71_layer_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_layer *layer;
u32 pipe_id, layer_id, layer_info;
get_resources_id(blk->block_info, &pipe_id, &layer_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer),
layer_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_layer_funcs, 0,
get_valid_inputs(blk),
1, reg, "LPU%d_LAYER%d", pipe_id, layer_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to add layer component\n");
return PTR_ERR(c);
}
layer = to_layer(c);
layer_info = malidp_read32(reg, LAYER_INFO);
if (layer_info & L_INFO_RF)
layer->layer_type = KOMEDA_FMT_RICH_LAYER;
else
layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
if (!d71->periph_addr) {
/* D32 or newer product */
layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE);
layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info);
} else if (d71->max_line_size > 2048) {
/* D71 4K */
layer->line_sz = d71->max_line_size;
layer->yuv_line_sz = layer->line_sz / 2;
} else {
/* D71 2K */
if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) {
/* rich layer is 4K configuration */
layer->line_sz = d71->max_line_size * 2;
layer->yuv_line_sz = layer->line_sz / 2;
} else {
layer->line_sz = d71->max_line_size;
layer->yuv_line_sz = 0;
}
}
set_range(&layer->hsize_in, 4, layer->line_sz);
set_range(&layer->vsize_in, 4, d71->max_vsize);
malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
layer->supported_rots = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK;
return 0;
}
static void d71_wb_layer_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_layer_state *st = to_layer_st(state);
struct drm_connector_state *conn_st = state->wb_conn->state;
struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
u32 ctrl = L_EN | LW_OFM, mask = L_EN | LW_OFM | LW_TBU_EN;
u32 __iomem *reg = c->reg;
d71_layer_update_fb(c, kfb, st->addr);
if (kfb->is_va)
ctrl |= LW_TBU_EN;
malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0));
malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_wb_layer_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[12], i;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0x80, 1, v);
seq_printf(sf, "LW_INPUT_ID0:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xD0, 3, v);
seq_printf(sf, "LW_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "LW_PROG_LINE:\t\t0x%X\n", v[1]);
seq_printf(sf, "LW_FORMAT:\t\t0x%X\n", v[2]);
get_values_from_reg(c->reg, 0xE0, 1, v);
seq_printf(sf, "LW_IN_SIZE:\t\t0x%X\n", v[0]);
for (i = 0; i < 2; i++) {
get_values_from_reg(c->reg, 0x100 + i * 0x10, 3, v);
seq_printf(sf, "LW_P%u_PTR_LOW:\t\t0x%X\n", i, v[0]);
seq_printf(sf, "LW_P%u_PTR_HIGH:\t\t0x%X\n", i, v[1]);
seq_printf(sf, "LW_P%u_STRIDE:\t\t0x%X\n", i, v[2]);
}
get_values_from_reg(c->reg, 0x130, 12, v);
for (i = 0; i < 12; i++)
seq_printf(sf, "LW_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
}
static void d71_wb_layer_disable(struct komeda_component *c)
{
malidp_write32(c->reg, BLK_INPUT_ID0, 0);
malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0);
}
static const struct komeda_component_funcs d71_wb_layer_funcs = {
.update = d71_wb_layer_update,
.disable = d71_wb_layer_disable,
.dump_register = d71_wb_layer_dump,
};
static int d71_wb_layer_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_layer *wb_layer;
u32 pipe_id, layer_id;
get_resources_id(blk->block_info, &pipe_id, &layer_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*wb_layer),
layer_id, BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_wb_layer_funcs,
1, get_valid_inputs(blk), 0, reg,
"LPU%d_LAYER_WR", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to add wb_layer component\n");
return PTR_ERR(c);
}
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
wb_layer->line_sz = get_blk_line_size(d71, reg);
wb_layer->yuv_line_sz = wb_layer->line_sz;
set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz);
set_range(&wb_layer->vsize_in, 64, d71->max_vsize);
return 0;
}
static void d71_component_disable(struct komeda_component *c)
{
u32 __iomem *reg = c->reg;
u32 i;
malidp_write32(reg, BLK_CONTROL, 0);
for (i = 0; i < c->max_active_inputs; i++) {
malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0);
/* Besides clearing the input ID to zero, D71 compiz also has
* input enable bit in CU_INPUTx_CONTROL which need to be
* cleared.
*/
if (has_bit(c->id, KOMEDA_PIPELINE_COMPIZS))
malidp_write32(reg, CU_INPUT0_CONTROL +
i * CU_PER_INPUT_REGS * 4,
CU_INPUT_CTRL_ALPHA(0xFF));
}
}
static void compiz_enable_input(u32 __iomem *id_reg,
u32 __iomem *cfg_reg,
u32 input_hw_id,
struct komeda_compiz_input_cfg *cin)
{
u32 ctrl = CU_INPUT_CTRL_EN;
u8 blend = cin->pixel_blend_mode;
if (blend == DRM_MODE_BLEND_PIXEL_NONE)
ctrl |= CU_INPUT_CTRL_PAD;
else if (blend == DRM_MODE_BLEND_PREMULTI)
ctrl |= CU_INPUT_CTRL_PMUL;
ctrl |= CU_INPUT_CTRL_ALPHA(cin->layer_alpha);
malidp_write32(id_reg, BLK_INPUT_ID0, input_hw_id);
malidp_write32(cfg_reg, CU_INPUT0_SIZE,
HV_SIZE(cin->hsize, cin->vsize));
malidp_write32(cfg_reg, CU_INPUT0_OFFSET,
HV_OFFSET(cin->hoffset, cin->voffset));
malidp_write32(cfg_reg, CU_INPUT0_CONTROL, ctrl);
}
static void d71_compiz_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_compiz_state *st = to_compiz_st(state);
u32 __iomem *reg = c->reg;
u32 __iomem *id_reg, *cfg_reg;
u32 index;
for_each_changed_input(state, index) {
id_reg = reg + index;
cfg_reg = reg + index * CU_PER_INPUT_REGS;
if (state->active_inputs & BIT(index)) {
compiz_enable_input(id_reg, cfg_reg,
to_d71_input_id(state, index),
&st->cins[index]);
} else {
malidp_write32(id_reg, BLK_INPUT_ID0, 0);
malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0);
}
}
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
}
static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[8], i;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0x80, 5, v);
for (i = 0; i < 5; i++)
seq_printf(sf, "CU_INPUT_ID%u:\t\t0x%X\n", i, v[i]);
get_values_from_reg(c->reg, 0xA0, 5, v);
seq_printf(sf, "CU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
seq_printf(sf, "CU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
seq_printf(sf, "CU_IRQ_MASK:\t\t0x%X\n", v[2]);
seq_printf(sf, "CU_IRQ_STATUS:\t\t0x%X\n", v[3]);
seq_printf(sf, "CU_STATUS:\t\t0x%X\n", v[4]);
get_values_from_reg(c->reg, 0xD0, 2, v);
seq_printf(sf, "CU_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "CU_SIZE:\t\t0x%X\n", v[1]);
get_values_from_reg(c->reg, 0xDC, 1, v);
seq_printf(sf, "CU_BG_COLOR:\t\t0x%X\n", v[0]);
for (i = 0, v[4] = 0xE0; i < 5; i++, v[4] += 0x10) {
get_values_from_reg(c->reg, v[4], 3, v);
seq_printf(sf, "CU_INPUT%u_SIZE:\t\t0x%X\n", i, v[0]);
seq_printf(sf, "CU_INPUT%u_OFFSET:\t0x%X\n", i, v[1]);
seq_printf(sf, "CU_INPUT%u_CONTROL:\t0x%X\n", i, v[2]);
}
get_values_from_reg(c->reg, 0x130, 2, v);
seq_printf(sf, "CU_USER_LOW:\t\t0x%X\n", v[0]);
seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
}
static const struct komeda_component_funcs d71_compiz_funcs = {
.update = d71_compiz_update,
.disable = d71_component_disable,
.dump_register = d71_compiz_dump,
};
static int d71_compiz_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_compiz *compiz;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz),
comp_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_compiz_funcs,
CU_NUM_INPUT_IDS, get_valid_inputs(blk),
CU_NUM_OUTPUT_IDS, reg,
"CU%d", pipe_id);
if (IS_ERR(c))
return PTR_ERR(c);
compiz = to_compiz(c);
set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg));
set_range(&compiz->vsize, 64, d71->max_vsize);
return 0;
}
static void d71_scaler_update_filter_lut(u32 __iomem *reg, u32 hsize_in,
u32 vsize_in, u32 hsize_out,
u32 vsize_out)
{
u32 val = 0;
if (hsize_in <= hsize_out)
val |= 0x62;
else if (hsize_in <= (hsize_out + hsize_out / 2))
val |= 0x63;
else if (hsize_in <= hsize_out * 2)
val |= 0x64;
else if (hsize_in <= hsize_out * 2 + (hsize_out * 3) / 4)
val |= 0x65;
else
val |= 0x66;
if (vsize_in <= vsize_out)
val |= SC_VTSEL(0x6A);
else if (vsize_in <= (vsize_out + vsize_out / 2))
val |= SC_VTSEL(0x6B);
else if (vsize_in <= vsize_out * 2)
val |= SC_VTSEL(0x6C);
else if (vsize_in <= vsize_out * 2 + vsize_out * 3 / 4)
val |= SC_VTSEL(0x6D);
else
val |= SC_VTSEL(0x6E);
malidp_write32(reg, SC_COEFFTAB, val);
}
static void d71_scaler_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_scaler_state *st = to_scaler_st(state);
u32 __iomem *reg = c->reg;
u32 init_ph, delta_ph, ctrl;
d71_scaler_update_filter_lut(reg, st->hsize_in, st->vsize_in,
st->hsize_out, st->vsize_out);
malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize_in, st->vsize_in));
malidp_write32(reg, SC_OUT_SIZE, HV_SIZE(st->hsize_out, st->vsize_out));
malidp_write32(reg, SC_H_CROP, HV_CROP(st->left_crop, st->right_crop));
/* for right part, HW only sample the valid pixel which means the pixels
* in left_crop will be jumpped, and the first sample pixel is:
*
* dst_a = st->total_hsize_out - st->hsize_out + st->left_crop + 0.5;
*
* Then the corresponding texel in src is:
*
* h_delta_phase = st->total_hsize_in / st->total_hsize_out;
* src_a = dst_A * h_delta_phase;
*
* and h_init_phase is src_a deduct the real source start src_S;
*
* src_S = st->total_hsize_in - st->hsize_in;
* h_init_phase = src_a - src_S;
*
* And HW precision for the initial/delta_phase is 16:16 fixed point,
* the following is the simplified formula
*/
if (st->right_part) {
u32 dst_a = st->total_hsize_out - st->hsize_out + st->left_crop;
if (st->en_img_enhancement)
dst_a -= 1;
init_ph = ((st->total_hsize_in * (2 * dst_a + 1) -
2 * st->total_hsize_out * (st->total_hsize_in -
st->hsize_in)) << 15) / st->total_hsize_out;
} else {
init_ph = (st->total_hsize_in << 15) / st->total_hsize_out;
}
malidp_write32(reg, SC_H_INIT_PH, init_ph);
delta_ph = (st->total_hsize_in << 16) / st->total_hsize_out;
malidp_write32(reg, SC_H_DELTA_PH, delta_ph);
init_ph = (st->total_vsize_in << 15) / st->vsize_out;
malidp_write32(reg, SC_V_INIT_PH, init_ph);
delta_ph = (st->total_vsize_in << 16) / st->vsize_out;
malidp_write32(reg, SC_V_DELTA_PH, delta_ph);
ctrl = 0;
ctrl |= st->en_scaling ? SC_CTRL_SCL : 0;
ctrl |= st->en_alpha ? SC_CTRL_AP : 0;
ctrl |= st->en_img_enhancement ? SC_CTRL_IENH : 0;
/* If we use the hardware splitter we shouldn't set SC_CTRL_LS */
if (st->en_split &&
state->inputs[0].component->id != KOMEDA_COMPONENT_SPLITTER)
ctrl |= SC_CTRL_LS;
malidp_write32(reg, BLK_CONTROL, ctrl);
malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0));
}
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[10];
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0x80, 1, v);
seq_printf(sf, "SC_INPUT_ID0:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xD0, 1, v);
seq_printf(sf, "SC_CONTROL:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xDC, 9, v);
seq_printf(sf, "SC_COEFFTAB:\t\t0x%X\n", v[0]);
seq_printf(sf, "SC_IN_SIZE:\t\t0x%X\n", v[1]);
seq_printf(sf, "SC_OUT_SIZE:\t\t0x%X\n", v[2]);
seq_printf(sf, "SC_H_CROP:\t\t0x%X\n", v[3]);
seq_printf(sf, "SC_V_CROP:\t\t0x%X\n", v[4]);
seq_printf(sf, "SC_H_INIT_PH:\t\t0x%X\n", v[5]);
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
get_values_from_reg(c->reg, 0x130, 10, v);
seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]);
seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]);
seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]);
seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]);
seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]);
seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]);
seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]);
seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
.update = d71_scaler_update,
.disable = d71_component_disable,
.dump_register = d71_scaler_dump,
};
static int d71_scaler_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_scaler *scaler;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*scaler),
comp_id, BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_scaler_funcs,
1, get_valid_inputs(blk), 1, reg,
"CU%d_SCALER%d",
pipe_id, BLOCK_INFO_BLK_ID(blk->block_info));
if (IS_ERR(c)) {
DRM_ERROR("Failed to initialize scaler");
return PTR_ERR(c);
}
scaler = to_scaler(c);
set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048));
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
scaler->scaling_split_overlap = 8;
scaler->enh_split_overlap = 1;
malidp_write32(c->reg, BLK_CONTROL, 0);
return 0;
}
static int d71_downscaling_clk_check(struct komeda_pipeline *pipe,
struct drm_display_mode *mode,
unsigned long aclk_rate,
struct komeda_data_flow_cfg *dflow)
{
u32 h_in = dflow->in_w;
u32 v_in = dflow->in_h;
u32 v_out = dflow->out_h;
u64 fraction, denominator;
/* D71 downscaling must satisfy the following equation
*
* ACLK h_in * v_in
* ------- >= ---------------------------------------------
* PXLCLK (h_total - (1 + 2 * v_in / v_out)) * v_out
*
* In only horizontal downscaling situation, the right side should be
* multiplied by (h_total - 3) / (h_active - 3), then equation becomes
*
* ACLK h_in
* ------- >= ----------------
* PXLCLK (h_active - 3)
*
* To avoid precision lost the equation 1 will be convert to:
*
* ACLK h_in * v_in
* ------- >= -----------------------------------
* PXLCLK (h_total -1 ) * v_out - 2 * v_in
*/
if (v_in == v_out) {
fraction = h_in;
denominator = mode->hdisplay - 3;
} else {
fraction = h_in * v_in;
denominator = (mode->htotal - 1) * v_out - 2 * v_in;
}
return aclk_rate * denominator >= mode->crtc_clock * 1000 * fraction ?
0 : -EINVAL;
}
static void d71_splitter_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_splitter_state *st = to_splitter_st(state);
u32 __iomem *reg = c->reg;
malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
malidp_write32(reg, SP_OVERLAP_SIZE, st->overlap & 0x1FFF);
malidp_write32(reg, BLK_CONTROL, BLK_CTRL_EN);
}
static void d71_splitter_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[3];
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, BLK_INPUT_ID0, 1, v);
seq_printf(sf, "SP_INPUT_ID0:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, BLK_CONTROL, 3, v);
seq_printf(sf, "SP_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "SP_SIZE:\t\t0x%X\n", v[1]);
seq_printf(sf, "SP_OVERLAP_SIZE:\t0x%X\n", v[2]);
}
static const struct komeda_component_funcs d71_splitter_funcs = {
.update = d71_splitter_update,
.disable = d71_component_disable,
.dump_register = d71_splitter_dump,
};
static int d71_splitter_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_splitter *splitter;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*splitter),
comp_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_splitter_funcs,
1, get_valid_inputs(blk), 2, reg,
"CU%d_SPLITTER", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to initialize splitter");
return -1;
}
splitter = to_splitter(c);
set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg));
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
}
static void d71_merger_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_merger_state *st = to_merger_st(state);
u32 __iomem *reg = c->reg;
u32 index;
for_each_changed_input(state, index)
malidp_write32(reg, MG_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, MG_SIZE, HV_SIZE(st->hsize_merged,
st->vsize_merged));
malidp_write32(reg, BLK_CONTROL, BLK_CTRL_EN);
}
static void d71_merger_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, MG_INPUT_ID0, 1, &v);
seq_printf(sf, "MG_INPUT_ID0:\t\t0x%X\n", v);
get_values_from_reg(c->reg, MG_INPUT_ID1, 1, &v);
seq_printf(sf, "MG_INPUT_ID1:\t\t0x%X\n", v);
get_values_from_reg(c->reg, BLK_CONTROL, 1, &v);
seq_printf(sf, "MG_CONTROL:\t\t0x%X\n", v);
get_values_from_reg(c->reg, MG_SIZE, 1, &v);
seq_printf(sf, "MG_SIZE:\t\t0x%X\n", v);
}
static const struct komeda_component_funcs d71_merger_funcs = {
.update = d71_merger_update,
.disable = d71_component_disable,
.dump_register = d71_merger_dump,
};
static int d71_merger_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_merger *merger;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*merger),
comp_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_merger_funcs,
MG_NUM_INPUTS_IDS, get_valid_inputs(blk),
MG_NUM_OUTPUTS_IDS, reg,
"CU%d_MERGER", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to initialize merger.\n");
return PTR_ERR(c);
}
merger = to_merger(c);
set_range(&merger->hsize_merged, 4,
__get_blk_line_size(d71, reg, 4032));
set_range(&merger->vsize_merged, 4, 4096);
return 0;
}
static void d71_improc_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct drm_crtc_state *crtc_st = state->crtc->state;
struct komeda_improc_state *st = to_improc_st(state);
struct d71_pipeline *pipe = to_d71_pipeline(c->pipeline);
u32 __iomem *reg = c->reg;
u32 index, mask = 0, ctrl = 0;
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
malidp_write32(reg, IPS_DEPTH, st->color_depth);
if (crtc_st->color_mgmt_changed) {
mask |= IPS_CTRL_FT | IPS_CTRL_RGB;
if (crtc_st->gamma_lut) {
malidp_write_group(pipe->dou_ft_coeff_addr, FT_COEFF0,
KOMEDA_N_GAMMA_COEFFS,
st->fgamma_coeffs);
ctrl |= IPS_CTRL_FT; /* enable gamma */
}
if (crtc_st->ctm) {
malidp_write_group(reg, IPS_RGB_RGB_COEFF0,
KOMEDA_N_CTM_COEFFS,
st->ctm_coeffs);
ctrl |= IPS_CTRL_RGB; /* enable gamut */
}
}
mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
/* config color format */
if (st->color_format == DRM_COLOR_FORMAT_YCBCR420)
ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
else if (st->color_format == DRM_COLOR_FORMAT_YCBCR422)
ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
else if (st->color_format == DRM_COLOR_FORMAT_YCBCR444)
ctrl |= IPS_CTRL_YUV;
malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[12], i;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0x80, 2, v);
seq_printf(sf, "IPS_INPUT_ID0:\t\t0x%X\n", v[0]);
seq_printf(sf, "IPS_INPUT_ID1:\t\t0x%X\n", v[1]);
get_values_from_reg(c->reg, 0xC0, 1, v);
seq_printf(sf, "IPS_INFO:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xD0, 3, v);
seq_printf(sf, "IPS_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "IPS_SIZE:\t\t0x%X\n", v[1]);
seq_printf(sf, "IPS_DEPTH:\t\t0x%X\n", v[2]);
get_values_from_reg(c->reg, 0x130, 12, v);
for (i = 0; i < 12; i++)
seq_printf(sf, "IPS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
get_values_from_reg(c->reg, 0x170, 12, v);
for (i = 0; i < 12; i++)
seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
}
static const struct komeda_component_funcs d71_improc_funcs = {
.update = d71_improc_update,
.disable = d71_component_disable,
.dump_register = d71_improc_dump,
};
static int d71_improc_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_improc *improc;
u32 pipe_id, comp_id, value;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc),
comp_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_improc_funcs, IPS_NUM_INPUT_IDS,
get_valid_inputs(blk),
IPS_NUM_OUTPUT_IDS, reg, "DOU%d_IPS", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to add improc component\n");
return PTR_ERR(c);
}
improc = to_improc(c);
improc->supported_color_depths = BIT(8) | BIT(10);
improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 |
DRM_COLOR_FORMAT_YCBCR444 |
DRM_COLOR_FORMAT_YCBCR422;
value = malidp_read32(reg, BLK_INFO);
if (value & IPS_INFO_CHD420)
improc->supported_color_formats |= DRM_COLOR_FORMAT_YCBCR420;
improc->supports_csc = true;
improc->supports_gamma = true;
return 0;
}
static void d71_timing_ctrlr_disable(struct komeda_component *c)
{
malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0);
}
static void d71_timing_ctrlr_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct drm_crtc_state *crtc_st = state->crtc->state;
struct drm_display_mode *mode = &crtc_st->adjusted_mode;
u32 __iomem *reg = c->reg;
u32 hactive, hfront_porch, hback_porch, hsync_len;
u32 vactive, vfront_porch, vback_porch, vsync_len;
u32 value;
hactive = mode->crtc_hdisplay;
hfront_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
hback_porch = mode->crtc_htotal - mode->crtc_hsync_end;
vactive = mode->crtc_vdisplay;
vfront_porch = mode->crtc_vsync_start - mode->crtc_vdisplay;
vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
vback_porch = mode->crtc_vtotal - mode->crtc_vsync_end;
malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(hactive, vactive));
malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(hfront_porch,
hback_porch));
malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vfront_porch,
vback_porch));
value = BS_SYNC_VSW(vsync_len) | BS_SYNC_HSW(hsync_len);
value |= mode->flags & DRM_MODE_FLAG_PVSYNC ? BS_SYNC_VSP : 0;
value |= mode->flags & DRM_MODE_FLAG_PHSYNC ? BS_SYNC_HSP : 0;
malidp_write32(reg, BS_SYNC, value);
malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1);
malidp_write32(reg, BS_PREFETCH_LINE, D71_DEFAULT_PREPRETCH_LINE);
/* configure bs control register */
value = BS_CTRL_EN | BS_CTRL_VM;
if (c->pipeline->dual_link) {
malidp_write32(reg, BS_DRIFT_TO, hfront_porch + 16);
value |= BS_CTRL_DL;
}
malidp_write32(reg, BLK_CONTROL, value);
}
static void d71_timing_ctrlr_dump(struct komeda_component *c,
struct seq_file *sf)
{
u32 v[8], i;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0xC0, 1, v);
seq_printf(sf, "BS_INFO:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xD0, 8, v);
seq_printf(sf, "BS_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "BS_PROG_LINE:\t\t0x%X\n", v[1]);
seq_printf(sf, "BS_PREFETCH_LINE:\t0x%X\n", v[2]);
seq_printf(sf, "BS_BG_COLOR:\t\t0x%X\n", v[3]);
seq_printf(sf, "BS_ACTIVESIZE:\t\t0x%X\n", v[4]);
seq_printf(sf, "BS_HINTERVALS:\t\t0x%X\n", v[5]);
seq_printf(sf, "BS_VINTERVALS:\t\t0x%X\n", v[6]);
seq_printf(sf, "BS_SYNC:\t\t0x%X\n", v[7]);
get_values_from_reg(c->reg, 0x100, 3, v);
seq_printf(sf, "BS_DRIFT_TO:\t\t0x%X\n", v[0]);
seq_printf(sf, "BS_FRAME_TO:\t\t0x%X\n", v[1]);
seq_printf(sf, "BS_TE_TO:\t\t0x%X\n", v[2]);
get_values_from_reg(c->reg, 0x110, 3, v);
for (i = 0; i < 3; i++)
seq_printf(sf, "BS_T%u_INTERVAL:\t\t0x%X\n", i, v[i]);
get_values_from_reg(c->reg, 0x120, 5, v);
for (i = 0; i < 2; i++) {
seq_printf(sf, "BS_CRC%u_LOW:\t\t0x%X\n", i, v[i << 1]);
seq_printf(sf, "BS_CRC%u_HIGH:\t\t0x%X\n", i, v[(i << 1) + 1]);
}
seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
}
static const struct komeda_component_funcs d71_timing_ctrlr_funcs = {
.update = d71_timing_ctrlr_update,
.disable = d71_timing_ctrlr_disable,
.dump_register = d71_timing_ctrlr_dump,
};
static int d71_timing_ctrlr_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_timing_ctrlr *ctrlr;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr),
KOMEDA_COMPONENT_TIMING_CTRLR,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_timing_ctrlr_funcs,
1, BIT(KOMEDA_COMPONENT_IPS0 + pipe_id),
BS_NUM_OUTPUT_IDS, reg, "DOU%d_BS", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to add display_ctrl component\n");
return PTR_ERR(c);
}
ctrlr = to_ctrlr(c);
ctrlr->supports_dual_link = d71->supports_dual_link;
return 0;
}
int d71_probe_block(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct d71_pipeline *pipe;
int blk_id = BLOCK_INFO_BLK_ID(blk->block_info);
int err = 0;
switch (BLOCK_INFO_BLK_TYPE(blk->block_info)) {
case D71_BLK_TYPE_GCU:
break;
case D71_BLK_TYPE_LPU:
pipe = d71->pipes[blk_id];
pipe->lpu_addr = reg;
break;
case D71_BLK_TYPE_LPU_LAYER:
err = d71_layer_init(d71, blk, reg);
break;
case D71_BLK_TYPE_LPU_WB_LAYER:
err = d71_wb_layer_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU:
pipe = d71->pipes[blk_id];
pipe->cu_addr = reg;
err = d71_compiz_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU_SCALER:
err = d71_scaler_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU_SPLITTER:
err = d71_splitter_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU_MERGER:
err = d71_merger_init(d71, blk, reg);
break;
case D71_BLK_TYPE_DOU:
pipe = d71->pipes[blk_id];
pipe->dou_addr = reg;
break;
case D71_BLK_TYPE_DOU_IPS:
err = d71_improc_init(d71, blk, reg);
break;
case D71_BLK_TYPE_DOU_FT_COEFF:
pipe = d71->pipes[blk_id];
pipe->dou_ft_coeff_addr = reg;
break;
case D71_BLK_TYPE_DOU_BS:
err = d71_timing_ctrlr_init(d71, blk, reg);
break;
case D71_BLK_TYPE_GLB_LT_COEFF:
break;
case D71_BLK_TYPE_GLB_SCL_COEFF:
d71->glb_scl_coeff_addr[blk_id] = reg;
break;
default:
DRM_ERROR("Unknown block (block_info: 0x%x) is found\n",
blk->block_info);
err = -EINVAL;
break;
}
return err;
}
static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf)
{
u32 v[5];
seq_puts(sf, "\n------ GCU ------\n");
get_values_from_reg(d71->gcu_addr, 0, 3, v);
seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]);
seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]);
seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]);
get_values_from_reg(d71->gcu_addr, 0x10, 1, v);
seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]);
get_values_from_reg(d71->gcu_addr, 0xA0, 5, v);
seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]);
seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]);
seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]);
get_values_from_reg(d71->gcu_addr, 0xD0, 3, v);
seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]);
seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]);
}
static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf)
{
u32 v[6];
seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id);
dump_block_header(sf, pipe->lpu_addr);
get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v);
seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]);
seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]);
seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]);
seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]);
get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v);
seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]);
get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v);
seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]);
seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]);
seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]);
}
static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf)
{
u32 v[5];
seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id);
dump_block_header(sf, pipe->dou_addr);
get_values_from_reg(pipe->dou_addr, 0xA0, 5, v);
seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]);
seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]);
seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]);
}
static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf)
{
struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe);
d71_lpu_dump(d71_pipe, sf);
d71_dou_dump(d71_pipe, sf);
}
const struct komeda_pipeline_funcs d71_pipeline_funcs = {
.downscaling_clk_check = d71_downscaling_clk_check,
.dump_register = d71_pipeline_dump,
};
void d71_dump(struct komeda_dev *mdev, struct seq_file *sf)
{
struct d71_dev *d71 = mdev->chip_data;
d71_gcu_dump(d71, sf);
}
| linux-master | drivers/gpu/drm/arm/display/komeda/d71/d71_component.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2017-2018 Broadcom */
/**
* DOC: Broadcom V3D MMU
*
* The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
* a single level of page tables for the V3D's 4GB address space to
* map to AXI bus addresses, thus it could need up to 4MB of
* physically contiguous memory to store the PTEs.
*
* Because the 4MB of contiguous memory for page tables is precious,
* and switching between them is expensive, we load all BOs into the
* same 4GB address space.
*
* To protect clients from each other, we should use the GMP to
* quickly mask out (at 128kb granularity) what pages are available to
* each client. This is not yet implemented.
*/
#include "v3d_drv.h"
#include "v3d_regs.h"
#define V3D_MMU_PAGE_SHIFT 12
/* Note: All PTEs for the 1MB superpage must be filled with the
* superpage bit set.
*/
#define V3D_PTE_SUPERPAGE BIT(31)
#define V3D_PTE_WRITEABLE BIT(29)
#define V3D_PTE_VALID BIT(28)
static int v3d_mmu_flush_all(struct v3d_dev *v3d)
{
int ret;
/* Make sure that another flush isn't already running when we
* start this one.
*/
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret)
dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
V3D_MMU_CTL_TLB_CLEAR);
V3D_WRITE(V3D_MMUC_CONTROL,
V3D_MMUC_CONTROL_FLUSH |
V3D_MMUC_CONTROL_ENABLE);
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret) {
dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
return ret;
}
ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
V3D_MMUC_CONTROL_FLUSHING), 100);
if (ret)
dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
return ret;
}
int v3d_mmu_set_page_table(struct v3d_dev *v3d)
{
V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
V3D_WRITE(V3D_MMU_CTL,
V3D_MMU_CTL_ENABLE |
V3D_MMU_CTL_PT_INVALID_ENABLE |
V3D_MMU_CTL_PT_INVALID_ABORT |
V3D_MMU_CTL_PT_INVALID_INT |
V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
V3D_MMU_CTL_WRITE_VIOLATION_INT |
V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
V3D_MMU_CTL_CAP_EXCEEDED_INT);
V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
(v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
V3D_MMU_ILLEGAL_ADDR_ENABLE);
V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
return v3d_mmu_flush_all(v3d);
}
void v3d_mmu_insert_ptes(struct v3d_bo *bo)
{
struct drm_gem_shmem_object *shmem_obj = &bo->base;
struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
struct sg_dma_page_iter dma_iter;
for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
u32 pte = page_prot | page_address;
u32 i;
BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
BIT(24));
for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
v3d->pt[page++] = pte + i;
}
WARN_ON_ONCE(page - bo->node.start !=
shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
{
struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
u32 page;
for (page = bo->node.start; page < bo->node.start + npages; page++)
v3d->pt[page] = 0;
if (v3d_mmu_flush_all(v3d))
dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
| linux-master | drivers/gpu/drm/v3d/v3d_mmu.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2018 Broadcom */
/**
* DOC: Broadcom V3D scheduling
*
* The shared DRM GPU scheduler is used to coordinate submitting jobs
* to the hardware. Each DRM fd (roughly a client process) gets its
* own scheduler entity, which will process jobs in order. The GPU
* scheduler will round-robin between clients to submit the next job.
*
* For simplicity, and in order to keep latency low for interactive
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
* drm_sched_job_add_dependency() to manage the dependency between bin and
* render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
#include <linux/kthread.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
static struct v3d_job *
to_v3d_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct v3d_job, base);
}
static struct v3d_bin_job *
to_bin_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct v3d_bin_job, base.base);
}
static struct v3d_render_job *
to_render_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct v3d_render_job, base.base);
}
static struct v3d_tfu_job *
to_tfu_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct v3d_tfu_job, base.base);
}
static struct v3d_csd_job *
to_csd_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct v3d_csd_job, base.base);
}
static void
v3d_sched_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
v3d_job_cleanup(job);
}
static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
if (job->perfmon != v3d->active_perfmon)
v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
if (job->perfmon && v3d->active_perfmon != job->perfmon)
v3d_perfmon_start(v3d, job->perfmon);
}
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
if (unlikely(job->base.base.s_fence->finished.error))
return NULL;
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
*/
spin_lock_irqsave(&v3d->job_lock, irqflags);
v3d->bin_job = job;
/* Clear out the overflow allocation, so we don't
* reuse the overflow attached to a previous job.
*/
V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, V3D_BIN);
if (IS_ERR(fence))
return NULL;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
job->start, job->end);
v3d_switch_perfmon(v3d, &job->base);
/* Set the current and end address of the control list.
* Writing the end register is what starts the job.
*/
if (job->qma) {
V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
}
if (job->qts) {
V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
V3D_CLE_CT0QTS_ENABLE |
job->qts);
}
V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
return fence;
}
static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error))
return NULL;
v3d->render_job = job;
/* Can we avoid this flush? We need to be careful of
* scheduling, though -- imagine job0 rendering to texture and
* job1 reading, and them being executed as bin0, bin1,
* render0, render1, so that render1's flush at bin time
* wasn't enough.
*/
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, V3D_RENDER);
if (IS_ERR(fence))
return NULL;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
job->start, job->end);
v3d_switch_perfmon(v3d, &job->base);
/* XXX: Set the QCFG */
/* Set the current and end address of the control list.
* Writing the end register is what starts the job.
*/
V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
return fence;
}
static struct dma_fence *
v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
V3D_WRITE(V3D_TFU_IIA, job->args.iia);
V3D_WRITE(V3D_TFU_IIS, job->args.iis);
V3D_WRITE(V3D_TFU_ICA, job->args.ica);
V3D_WRITE(V3D_TFU_IUA, job->args.iua);
V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
V3D_WRITE(V3D_TFU_IOS, job->args.ios);
V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
}
/* ICFG kicks off the job. */
V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
return fence;
}
static struct dma_fence *
v3d_csd_job_run(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
int i;
v3d->csd_job = job;
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, V3D_CSD);
if (IS_ERR(fence))
return NULL;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
v3d_switch_perfmon(v3d, &job->base);
for (i = 1; i <= 6; i++)
V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
/* CFG0 write kicks off the job. */
V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]);
return fence;
}
static struct dma_fence *
v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_dev *v3d = job->v3d;
v3d_clean_caches(v3d);
return NULL;
}
static enum drm_gpu_sched_stat
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{
enum v3d_queue q;
mutex_lock(&v3d->reset_lock);
/* block scheduler */
for (q = 0; q < V3D_MAX_QUEUES; q++)
drm_sched_stop(&v3d->queue[q].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
v3d_reset(v3d);
for (q = 0; q < V3D_MAX_QUEUES; q++)
drm_sched_resubmit_jobs(&v3d->queue[q].sched);
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
drm_sched_start(&v3d->queue[q].sched, true);
}
mutex_unlock(&v3d->reset_lock);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_dev *v3d = job->v3d;
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
return DRM_GPU_SCHED_STAT_NOMINAL;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
}
static enum drm_gpu_sched_stat
v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
return v3d_cl_job_timedout(sched_job, V3D_BIN,
&job->timedout_ctca, &job->timedout_ctra);
}
static enum drm_gpu_sched_stat
v3d_render_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
return v3d_cl_job_timedout(sched_job, V3D_RENDER,
&job->timedout_ctca, &job->timedout_ctra);
}
static enum drm_gpu_sched_stat
v3d_generic_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
}
static enum drm_gpu_sched_stat
v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4);
/* If we've made progress, skip reset and let the timer get
* rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
return DRM_GPU_SCHED_STAT_NOMINAL;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
.run_job = v3d_bin_job_run,
.timedout_job = v3d_bin_job_timedout,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
.run_job = v3d_render_job_run,
.timedout_job = v3d_render_job_timedout,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_generic_job_timedout,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
.run_job = v3d_csd_job_run,
.timedout_job = v3d_csd_job_timedout,
.free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.run_job = v3d_cache_clean_job_run,
.timedout_job = v3d_generic_job_timedout,
.free_job = v3d_sched_job_free
};
int
v3d_sched_init(struct v3d_dev *v3d)
{
int hw_jobs_limit = 1;
int job_hang_limit = 0;
int hang_limit_ms = 500;
int ret;
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
&v3d_bin_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_bin", v3d->drm.dev);
if (ret)
return ret;
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
&v3d_render_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_render", v3d->drm.dev);
if (ret)
goto fail;
ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
&v3d_tfu_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_tfu", v3d->drm.dev);
if (ret)
goto fail;
if (v3d_has_csd(v3d)) {
ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
&v3d_csd_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_csd", v3d->drm.dev);
if (ret)
goto fail;
ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
&v3d_cache_clean_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_cache_clean", v3d->drm.dev);
if (ret)
goto fail;
}
return 0;
fail:
v3d_sched_fini(v3d);
return ret;
}
void
v3d_sched_fini(struct v3d_dev *v3d)
{
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
if (v3d->queue[q].sched.ready)
drm_sched_fini(&v3d->queue[q].sched);
}
}
| linux-master | drivers/gpu/drm/v3d/v3d_sched.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
/**
* DOC: Broadcom V3D Graphics Driver
*
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
* For V3D 2.x support, see the VC4 driver.
*
* The V3D GPU includes a tiled render (composed of a bin and render
* pipelines), the TFU (texture formatting unit), and the CSD (compute
* shader dispatch).
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#define DRIVER_NAME "v3d"
#define DRIVER_DESC "Broadcom V3D graphics"
#define DRIVER_DATE "20180419"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_get_param *args = data;
static const u32 reg_map[] = {
[DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG,
[DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1,
[DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2,
[DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3,
[DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0,
[DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1,
[DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2,
};
if (args->pad != 0)
return -EINVAL;
/* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need
* to explicitly allow it in the "the register in our
* parameter map" check.
*/
if (args->param < ARRAY_SIZE(reg_map) &&
(reg_map[args->param] ||
args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) {
u32 offset = reg_map[args->param];
if (args->value != 0)
return -EINVAL;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
args->value = V3D_CORE_READ(0, offset);
} else {
args->value = V3D_READ(offset);
}
return 0;
}
switch (args->param) {
case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_CSD:
args->value = v3d_has_csd(v3d);
return 0;
case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH:
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
args->value = (v3d->ver >= 40);
return 0;
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
}
}
static int
v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
struct drm_gpu_scheduler *sched;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
if (!v3d_priv)
return -ENOMEM;
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
sched = &v3d->queue[i].sched;
drm_sched_entity_init(&v3d_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
}
v3d_perfmon_open_file(v3d_priv);
file->driver_priv = v3d_priv;
return 0;
}
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++)
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
}
DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be
* able to submit CLs that could access BOs from clients authenticated
* with the master node. The TFU doesn't use the GMP, so it would
* need to stay DRM_AUTH until we do buffer size/offset validation.
*/
static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
};
static const struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
DRIVER_SYNCOBJ),
.open = v3d_open,
.postclose = v3d_postclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = v3d_debugfs_init,
#endif
.gem_create_object = v3d_create_object,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
.fops = &v3d_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static const struct of_device_id v3d_of_match[] = {
{ .compatible = "brcm,2711-v3d" },
{ .compatible = "brcm,7268-v3d" },
{ .compatible = "brcm,7278-v3d" },
{},
};
MODULE_DEVICE_TABLE(of, v3d_of_match);
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
*regs = devm_platform_ioremap_resource_byname(v3d_to_pdev(v3d), name);
return PTR_ERR_OR_ZERO(*regs);
}
static int v3d_platform_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct v3d_dev *v3d;
int ret;
u32 mmu_debug;
u32 ident1;
u64 mask;
v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
if (IS_ERR(v3d))
return PTR_ERR(v3d);
drm = &v3d->drm;
platform_set_drvdata(pdev, drm);
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
return ret;
ret = map_regs(v3d, &v3d->core_regs[0], "core0");
if (ret)
return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
if (ret)
return ret;
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(v3d->reset)) {
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
return ret;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
return ret;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
return ret;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
return -ENOMEM;
}
ret = v3d_gem_init(drm);
if (ret)
goto dma_free;
ret = v3d_irq_init(v3d);
if (ret)
goto gem_destroy;
ret = drm_dev_register(drm, 0);
if (ret)
goto irq_disable;
return 0;
irq_disable:
v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
return ret;
}
static void v3d_platform_drm_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct v3d_dev *v3d = to_v3d_dev(drm);
drm_dev_unregister(drm);
v3d_gem_destroy(drm);
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
}
static struct platform_driver v3d_platform_driver = {
.probe = v3d_platform_drm_probe,
.remove_new = v3d_platform_drm_remove,
.driver = {
.name = "v3d",
.of_match_table = v3d_of_match,
},
};
module_platform_driver(v3d_platform_driver);
MODULE_ALIAS("platform:v3d-drm");
MODULE_DESCRIPTION("Broadcom V3D DRM Driver");
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/v3d/v3d_drv.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <drm/drm_managed.h>
#include <drm/drm_syncobj.h>
#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
static void
v3d_init_core(struct v3d_dev *v3d, int core)
{
/* Set OVRTMUOUT, which means that the texture sampler uniform
* configuration's tmu output type field is used, instead of
* using the hardware default behavior based on the texture
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
if (v3d->ver < 40)
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
* the whole thing.
*/
V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
}
/* Sets invariant state for the HW. */
static void
v3d_init_hw_state(struct v3d_dev *v3d)
{
v3d_init_core(v3d, 0);
}
static void
v3d_idle_axi(struct v3d_dev *v3d, int core)
{
V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
(V3D_GMP_STATUS_RD_COUNT_MASK |
V3D_GMP_STATUS_WR_COUNT_MASK |
V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
DRM_ERROR("Failed to wait for safe GMP shutdown\n");
}
}
static void
v3d_idle_gca(struct v3d_dev *v3d)
{
if (v3d->ver >= 41)
return;
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
DRM_ERROR("Failed to wait for safe GCA shutdown\n");
}
}
static void
v3d_reset_by_bridge(struct v3d_dev *v3d)
{
int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
/* GFXH-1383: The SW_INIT may cause a stray write to address 0
* of the unit, so reset it to its power-on value here.
*/
V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
} else {
WARN_ON_ONCE(V3D_GET_FIELD(version,
V3D_TOP_GR_BRIDGE_MAJOR) != 7);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
}
}
static void
v3d_reset_v3d(struct v3d_dev *v3d)
{
if (v3d->reset)
reset_control_reset(v3d->reset);
else
v3d_reset_by_bridge(v3d);
v3d_init_hw_state(v3d);
}
void
v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
V3D_CORE_READ(0, V3D_ERR_STAT));
trace_v3d_reset_begin(dev);
/* XXX: only needed for safe powerdown, not reset. */
if (false)
v3d_idle_axi(v3d, 0);
v3d_idle_gca(v3d);
v3d_reset_v3d(v3d);
v3d_mmu_set_page_table(v3d);
v3d_irq_reset(v3d);
v3d_perfmon_stop(v3d, v3d->active_perfmon, false);
trace_v3d_reset_end(dev);
}
static void
v3d_flush_l3(struct v3d_dev *v3d)
{
if (v3d->ver < 41) {
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
if (v3d->ver < 33) {
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
}
}
}
/* Invalidates the (read-only) L2C cache. This was the L2 cache for
* uniforms and instructions on V3D 3.2.
*/
static void
v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{
if (v3d->ver > 32)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
V3D_L2CACTL_L2CCLR |
V3D_L2CACTL_L2CENA);
}
/* Invalidates texture L2 cachelines */
static void
v3d_flush_l2t(struct v3d_dev *v3d, int core)
{
/* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
* need to wait for completion before dispatching the job --
* L2T accesses will be stalled until the flush has completed.
* However, we do need to make sure we don't try to trigger a
* new flush while the L2_CLEAN queue is trying to
* synchronously clean after a job.
*/
mutex_lock(&v3d->cache_clean_lock);
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
V3D_L2TCACTL_L2TFLS |
V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
mutex_unlock(&v3d->cache_clean_lock);
}
/* Cleans texture L1 and L2 cachelines (writing back dirty data).
*
* For cleaning, which happens from the CACHE_CLEAN queue after CSD has
* executed, we need to make sure that the clean is done before
* signaling job completion. So, we synchronously wait before
* returning, and we make sure that L2 invalidates don't happen in the
* meantime to confuse our are-we-done checks.
*/
void
v3d_clean_caches(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
int core = 0;
trace_v3d_cache_clean_begin(dev);
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
V3D_L2TCACTL_TMUWCF), 100)) {
DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
}
mutex_lock(&v3d->cache_clean_lock);
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
V3D_L2TCACTL_L2TFLS |
V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
V3D_L2TCACTL_L2TFLS), 100)) {
DRM_ERROR("Timeout waiting for L2T clean\n");
}
mutex_unlock(&v3d->cache_clean_lock);
trace_v3d_cache_clean_end(dev);
}
/* Invalidates the slice caches. These are read-only caches. */
static void
v3d_invalidate_slices(struct v3d_dev *v3d, int core)
{
V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}
void
v3d_invalidate_caches(struct v3d_dev *v3d)
{
/* Invalidate the caches from the outside in. That way if
* another CL's concurrent use of nearby memory were to pull
* an invalidated cacheline back in, we wouldn't leave stale
* data in the inner cache.
*/
v3d_flush_l3(v3d);
v3d_invalidate_l2c(v3d, 0);
v3d_flush_l2t(v3d, 0);
v3d_invalidate_slices(v3d, 0);
}
/* Takes the reservation lock on all the BOs being referenced, so that
* at queue submit time we can update the reservations.
*
* We don't lock the RCL the tile alloc/state BOs, or overflow memory
* (all of which are on exec->unref_list). They're entirely private
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
v3d_lock_bo_reservations(struct v3d_job *job,
struct ww_acquire_ctx *acquire_ctx)
{
int i, ret;
ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
if (ret)
return ret;
for (i = 0; i < job->bo_count; i++) {
ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
if (ret)
goto fail;
ret = drm_sched_job_add_implicit_dependencies(&job->base,
job->bo[i], true);
if (ret)
goto fail;
}
return 0;
fail:
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
return ret;
}
/**
* v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
* referenced by the job.
* @dev: DRM device
* @file_priv: DRM file for this fd
* @job: V3D job being set up
* @bo_handles: GEM handles
* @bo_count: Number of GEM handles passed in
*
* The command validator needs to reference BOs by their index within
* the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
*
* Note that this function doesn't need to unreference the BOs on
* failure, because that will happen at v3d_exec_cleanup() time.
*/
static int
v3d_lookup_bos(struct drm_device *dev,
struct drm_file *file_priv,
struct v3d_job *job,
u64 bo_handles,
u32 bo_count)
{
job->bo_count = bo_count;
if (!job->bo_count) {
/* See comment on bo_index for why we have to check
* this.
*/
DRM_DEBUG("Rendering requires BOs\n");
return -EINVAL;
}
return drm_gem_objects_lookup(file_priv,
(void __user *)(uintptr_t)bo_handles,
job->bo_count, &job->bo);
}
static void
v3d_job_free(struct kref *ref)
{
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
int i;
if (job->bo) {
for (i = 0; i < job->bo_count; i++)
drm_gem_object_put(job->bo[i]);
kvfree(job->bo);
}
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
if (job->perfmon)
v3d_perfmon_put(job->perfmon);
kfree(job);
}
static void
v3d_render_job_free(struct kref *ref)
{
struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
base.refcount);
struct v3d_bo *bo, *save;
list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
drm_gem_object_put(&bo->base.base);
}
v3d_job_free(ref);
}
void v3d_job_cleanup(struct v3d_job *job)
{
if (!job)
return;
drm_sched_job_cleanup(&job->base);
v3d_job_put(job);
}
void v3d_job_put(struct v3d_job *job)
{
kref_put(&job->refcount, job->free);
}
int
v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
struct drm_v3d_wait_bo *args = data;
ktime_t start = ktime_get();
u64 delta_ns;
unsigned long timeout_jiffies =
nsecs_to_jiffies_timeout(args->timeout_ns);
if (args->pad != 0)
return -EINVAL;
ret = drm_gem_dma_resv_wait(file_priv, args->handle,
true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
*/
delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
if (delta_ns < args->timeout_ns)
args->timeout_ns -= delta_ns;
else
args->timeout_ns = 0;
/* Asked to wait beyond the jiffie/scheduler precision? */
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
return ret;
}
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
void **container, size_t size, void (*free)(struct kref *ref),
u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
{
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct v3d_job *job;
bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
int ret, i;
*container = kcalloc(1, size, GFP_KERNEL);
if (!*container) {
DRM_ERROR("Cannot allocate memory for v3d job.");
return -ENOMEM;
}
job = *container;
job->v3d = v3d;
job->free = free;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
goto fail;
if (has_multisync) {
if (se->in_sync_count && se->wait_stage == queue) {
struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
for (i = 0; i < se->in_sync_count; i++) {
struct drm_v3d_sem in;
if (copy_from_user(&in, handle++, sizeof(in))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy wait dep handle.\n");
goto fail_deps;
}
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
// TODO: Investigate why this was filtered out for the IOCTL.
if (ret && ret != -ENOENT)
goto fail_deps;
}
}
} else {
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
// TODO: Investigate why this was filtered out for the IOCTL.
if (ret && ret != -ENOENT)
goto fail_deps;
}
kref_init(&job->refcount);
return 0;
fail_deps:
drm_sched_job_cleanup(&job->base);
fail:
kfree(*container);
*container = NULL;
return ret;
}
static void
v3d_push_job(struct v3d_job *job)
{
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
/* put by scheduler job completion */
kref_get(&job->refcount);
drm_sched_entity_push_job(&job->base);
}
static void
v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
struct v3d_job *job,
struct ww_acquire_ctx *acquire_ctx,
u32 out_sync,
struct v3d_submit_ext *se,
struct dma_fence *done_fence)
{
struct drm_syncobj *sync_out;
bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
int i;
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
DMA_RESV_USAGE_WRITE);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
/* Update the return sync object for the job */
/* If it only supports a single signal semaphore*/
if (!has_multisync) {
sync_out = drm_syncobj_find(file_priv, out_sync);
if (sync_out) {
drm_syncobj_replace_fence(sync_out, done_fence);
drm_syncobj_put(sync_out);
}
return;
}
/* If multiple semaphores extension is supported */
if (se->out_sync_count) {
for (i = 0; i < se->out_sync_count; i++) {
drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
done_fence);
drm_syncobj_put(se->out_syncs[i].syncobj);
}
kvfree(se->out_syncs);
}
}
static void
v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
{
unsigned int i;
if (!(se && se->out_sync_count))
return;
for (i = 0; i < se->out_sync_count; i++)
drm_syncobj_put(se->out_syncs[i].syncobj);
kvfree(se->out_syncs);
}
static int
v3d_get_multisync_post_deps(struct drm_file *file_priv,
struct v3d_submit_ext *se,
u32 count, u64 handles)
{
struct drm_v3d_sem __user *post_deps;
int i, ret;
if (!count)
return 0;
se->out_syncs = (struct v3d_submit_outsync *)
kvmalloc_array(count,
sizeof(struct v3d_submit_outsync),
GFP_KERNEL);
if (!se->out_syncs)
return -ENOMEM;
post_deps = u64_to_user_ptr(handles);
for (i = 0; i < count; i++) {
struct drm_v3d_sem out;
if (copy_from_user(&out, post_deps++, sizeof(out))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy post dep handles\n");
goto fail;
}
se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
out.handle);
if (!se->out_syncs[i].syncobj) {
ret = -EINVAL;
goto fail;
}
}
se->out_sync_count = count;
return 0;
fail:
for (i--; i >= 0; i--)
drm_syncobj_put(se->out_syncs[i].syncobj);
kvfree(se->out_syncs);
return ret;
}
/* Get data for multiple binary semaphores synchronization. Parse syncobj
* to be signaled when job completes (out_sync).
*/
static int
v3d_get_multisync_submit_deps(struct drm_file *file_priv,
struct drm_v3d_extension __user *ext,
void *data)
{
struct drm_v3d_multi_sync multisync;
struct v3d_submit_ext *se = data;
int ret;
if (copy_from_user(&multisync, ext, sizeof(multisync)))
return -EFAULT;
if (multisync.pad)
return -EINVAL;
ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
multisync.out_syncs);
if (ret)
return ret;
se->in_sync_count = multisync.in_sync_count;
se->in_syncs = multisync.in_syncs;
se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
se->wait_stage = multisync.wait_stage;
return 0;
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
* according to the extension id (name).
*/
static int
v3d_get_extensions(struct drm_file *file_priv,
u64 ext_handles,
void *data)
{
struct drm_v3d_extension __user *user_ext;
int ret;
user_ext = u64_to_user_ptr(ext_handles);
while (user_ext) {
struct drm_v3d_extension ext;
if (copy_from_user(&ext, user_ext, sizeof(ext))) {
DRM_DEBUG("Failed to copy submit extension\n");
return -EFAULT;
}
switch (ext.id) {
case DRM_V3D_EXT_ID_MULTI_SYNC:
ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
if (ret)
return ret;
break;
default:
DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
return -EINVAL;
}
user_ext = u64_to_user_ptr(ext.next);
}
return 0;
}
/**
* v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
* @dev: DRM device
* @data: ioctl argument
* @file_priv: DRM file for this fd
*
* This is the main entrypoint for userspace to submit a 3D frame to
* the GPU. Userspace provides the binner command list (if
* applicable), and the kernel sets up the render command list to draw
* to the framebuffer described in the ioctl, using the command lists
* that the 3D engine's binner will produce.
*/
int
v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_cl *args = data;
struct v3d_submit_ext se = {0};
struct v3d_bin_job *bin = NULL;
struct v3d_render_job *render = NULL;
struct v3d_job *clean_job = NULL;
struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
if (args->pad)
return -EINVAL;
if (args->flags &&
args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
DRM_V3D_SUBMIT_EXTENSION)) {
DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se);
if (ret) {
DRM_DEBUG("Failed to get extensions.\n");
return ret;
}
}
ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
if (ret)
goto fail;
render->start = args->rcl_start;
render->end = args->rcl_end;
INIT_LIST_HEAD(&render->unref_list);
if (args->bcl_start != args->bcl_end) {
ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
if (ret)
goto fail;
bin->start = args->bcl_start;
bin->end = args->bcl_end;
bin->qma = args->qma;
bin->qms = args->qms;
bin->qts = args->qts;
bin->render = render;
}
if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
if (ret)
goto fail;
last_job = clean_job;
} else {
last_job = &render->base;
}
ret = v3d_lookup_bos(dev, file_priv, last_job,
args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
if (ret)
goto fail;
if (args->perfmon_id) {
render->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
if (!render->base.perfmon) {
ret = -ENOENT;
goto fail_perfmon;
}
}
mutex_lock(&v3d->sched_lock);
if (bin) {
bin->base.perfmon = render->base.perfmon;
v3d_perfmon_get(bin->base.perfmon);
v3d_push_job(&bin->base);
ret = drm_sched_job_add_dependency(&render->base.base,
dma_fence_get(bin->base.done_fence));
if (ret)
goto fail_unreserve;
}
v3d_push_job(&render->base);
if (clean_job) {
struct dma_fence *render_fence =
dma_fence_get(render->base.done_fence);
ret = drm_sched_job_add_dependency(&clean_job->base,
render_fence);
if (ret)
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
v3d_perfmon_get(clean_job->perfmon);
v3d_push_job(clean_job);
}
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
last_job,
&acquire_ctx,
args->out_sync,
&se,
last_job->done_fence);
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
if (clean_job)
v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
fail_perfmon:
drm_gem_unlock_reservations(last_job->bo,
last_job->bo_count, &acquire_ctx);
fail:
v3d_job_cleanup((void *)bin);
v3d_job_cleanup((void *)render);
v3d_job_cleanup(clean_job);
v3d_put_multisync_post_deps(&se);
return ret;
}
/**
* v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
* @dev: DRM device
* @data: ioctl argument
* @file_priv: DRM file for this fd
*
* Userspace provides the register setup for the TFU, which we don't
* need to validate since the TFU is behind the MMU.
*/
int
v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_submit_tfu *args = data;
struct v3d_submit_ext se = {0};
struct v3d_tfu_job *job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
DRM_DEBUG("invalid flags: %d\n", args->flags);
return -EINVAL;
}
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se);
if (ret) {
DRM_DEBUG("Failed to get extensions.\n");
return ret;
}
}
ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
v3d_job_free, args->in_sync, &se, V3D_TFU);
if (ret)
goto fail;
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
if (!job->base.bo) {
ret = -ENOMEM;
goto fail;
}
job->args = *args;
for (job->base.bo_count = 0;
job->base.bo_count < ARRAY_SIZE(args->bo_handles);
job->base.bo_count++) {
struct drm_gem_object *bo;
if (!args->bo_handles[job->base.bo_count])
break;
bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
job->base.bo_count,
args->bo_handles[job->base.bo_count]);
ret = -ENOENT;
goto fail;
}
job->base.bo[job->base.bo_count] = bo;
}
ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
if (ret)
goto fail;
mutex_lock(&v3d->sched_lock);
v3d_push_job(&job->base);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
&job->base, &acquire_ctx,
args->out_sync,
&se,
job->base.done_fence);
v3d_job_put(&job->base);
return 0;
fail:
v3d_job_cleanup((void *)job);
v3d_put_multisync_post_deps(&se);
return ret;
}
/**
* v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
* @dev: DRM device
* @data: ioctl argument
* @file_priv: DRM file for this fd
*
* Userspace provides the register setup for the CSD, which we don't
* need to validate since the CSD is behind the MMU.
*/
int
v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_csd *args = data;
struct v3d_submit_ext se = {0};
struct v3d_csd_job *job = NULL;
struct v3d_job *clean_job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret;
trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
if (args->pad)
return -EINVAL;
if (!v3d_has_csd(v3d)) {
DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
return -EINVAL;
}
if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se);
if (ret) {
DRM_DEBUG("Failed to get extensions.\n");
return ret;
}
}
ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
v3d_job_free, args->in_sync, &se, V3D_CSD);
if (ret)
goto fail;
ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
if (ret)
goto fail;
job->args = *args;
ret = v3d_lookup_bos(dev, file_priv, clean_job,
args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
if (ret)
goto fail;
if (args->perfmon_id) {
job->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
if (!job->base.perfmon) {
ret = -ENOENT;
goto fail_perfmon;
}
}
mutex_lock(&v3d->sched_lock);
v3d_push_job(&job->base);
ret = drm_sched_job_add_dependency(&clean_job->base,
dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
v3d_push_job(clean_job);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
clean_job,
&acquire_ctx,
args->out_sync,
&se,
clean_job->done_fence);
v3d_job_put(&job->base);
v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
fail_perfmon:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
v3d_job_cleanup((void *)job);
v3d_job_cleanup(clean_job);
v3d_put_multisync_post_deps(&se);
return ret;
}
int
v3d_gem_init(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
u32 pt_size = 4096 * 1024;
int ret, i;
for (i = 0; i < V3D_MAX_QUEUES; i++)
v3d->queue[i].fence_context = dma_fence_context_alloc(1);
spin_lock_init(&v3d->mm_lock);
spin_lock_init(&v3d->job_lock);
ret = drmm_mutex_init(dev, &v3d->bo_lock);
if (ret)
return ret;
ret = drmm_mutex_init(dev, &v3d->reset_lock);
if (ret)
return ret;
ret = drmm_mutex_init(dev, &v3d->sched_lock);
if (ret)
return ret;
ret = drmm_mutex_init(dev, &v3d->cache_clean_lock);
if (ret)
return ret;
/* Note: We don't allocate address 0. Various bits of HW
* treat 0 as special, such as the occlusion query counters
* where 0 means "disabled".
*/
drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
&v3d->pt_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
"Failed to allocate page tables. Please ensure you have DMA enabled.\n");
return -ENOMEM;
}
v3d_init_hw_state(v3d);
v3d_mmu_set_page_table(v3d);
ret = v3d_sched_init(v3d);
if (ret) {
drm_mm_takedown(&v3d->mm);
dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
v3d->pt_paddr);
}
return 0;
}
void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
v3d_sched_fini(v3d);
/* Waiting for jobs to finish would need to be done before
* unregistering V3D.
*/
WARN_ON(v3d->bin_job);
WARN_ON(v3d->render_job);
drm_mm_takedown(&v3d->mm);
dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
v3d->pt_paddr);
}
| linux-master | drivers/gpu/drm/v3d/v3d_gem.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
/**
* DOC: V3D GEM BO management support
*
* Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
* GPU and the bus, allowing us to use shmem objects for our storage
* instead of CMA.
*
* Physically contiguous objects may still be imported to V3D, but the
* driver doesn't allocate physically contiguous objects on its own.
* Display engines requiring physically contiguous allocations should
* look into Mesa's "renderonly" support (as used by the Mesa pl111
* driver) for an example of how to integrate with V3D.
*
* Long term, we should support evicting pages from the MMU when under
* memory pressure (thus the v3d_bo_get_pages() refcounting), but
* that's not a high priority since our systems tend to not have swap.
*/
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
void v3d_free_object(struct drm_gem_object *obj)
{
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
v3d_mmu_remove_ptes(bo);
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);
/* GPU execution may have dirtied any pages in the BO. */
bo->base.pages_mark_dirty_on_put = true;
drm_gem_shmem_free(&bo->base);
}
static const struct drm_gem_object_funcs v3d_gem_funcs = {
.free = v3d_free_object,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
/* gem_create_object function for allocating a BO struct and doing
* early setup.
*/
struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
struct v3d_bo *bo;
struct drm_gem_object *obj;
if (size == 0)
return ERR_PTR(-EINVAL);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs;
bo->base.map_wc = true;
INIT_LIST_HEAD(&bo->unref_head);
return &bo->base.base;
}
static int
v3d_bo_create_finish(struct drm_gem_object *obj)
{
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
struct sg_table *sgt;
int ret;
/* So far we pin the BO in the MMU for its lifetime, so use
* shmem's helper for getting a lifetime sgt.
*/
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
spin_lock(&v3d->mm_lock);
/* Allocate the object's space in the GPU's page tables.
* Inserting PTEs will happen later, but the offset is for the
* lifetime of the BO.
*/
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
obj->size >> PAGE_SHIFT,
GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
/* Track stats for /debug/dri/n/bo_stats. */
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated++;
v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
v3d_mmu_insert_ptes(bo);
return 0;
}
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t unaligned_size)
{
struct drm_gem_shmem_object *shmem_obj;
struct v3d_bo *bo;
int ret;
shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
bo = to_v3d_bo(&shmem_obj->base);
ret = v3d_bo_create_finish(&shmem_obj->base);
if (ret)
goto free_obj;
return bo;
free_obj:
drm_gem_shmem_free(shmem_obj);
return ERR_PTR(ret);
}
struct drm_gem_object *
v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
int ret;
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return obj;
ret = v3d_bo_create_finish(obj);
if (ret) {
drm_gem_shmem_free(&to_v3d_bo(obj)->base);
return ERR_PTR(ret);
}
return obj;
}
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_v3d_create_bo *args = data;
struct v3d_bo *bo = NULL;
int ret;
if (args->flags != 0) {
DRM_INFO("unknown create_bo flags: %d\n", args->flags);
return -EINVAL;
}
bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
if (IS_ERR(bo))
return PTR_ERR(bo);
args->offset = bo->node.start << PAGE_SHIFT;
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put(&bo->base.base);
return ret;
}
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_v3d_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
return -EINVAL;
}
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_put(gem_obj);
return 0;
}
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_v3d_get_bo_offset *args = data;
struct drm_gem_object *gem_obj;
struct v3d_bo *bo;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_v3d_bo(gem_obj);
args->offset = bo->node.start << PAGE_SHIFT;
drm_gem_object_put(gem_obj);
return 0;
}
| linux-master | drivers/gpu/drm/v3d/v3d_bo.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#define REGDEF(reg) { reg, #reg }
struct v3d_reg_def {
u32 reg;
const char *name;
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
REGDEF(V3D_HUB_AXICFG),
REGDEF(V3D_HUB_UIFCFG),
REGDEF(V3D_HUB_IDENT0),
REGDEF(V3D_HUB_IDENT1),
REGDEF(V3D_HUB_IDENT2),
REGDEF(V3D_HUB_IDENT3),
REGDEF(V3D_HUB_INT_STS),
REGDEF(V3D_HUB_INT_MSK_STS),
REGDEF(V3D_MMU_CTL),
REGDEF(V3D_MMU_VIO_ADDR),
REGDEF(V3D_MMU_VIO_ID),
REGDEF(V3D_MMU_DEBUG_INFO),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
REGDEF(V3D_GCA_SAFE_SHUTDOWN),
REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
REGDEF(V3D_CTL_IDENT0),
REGDEF(V3D_CTL_IDENT1),
REGDEF(V3D_CTL_IDENT2),
REGDEF(V3D_CTL_MISCCFG),
REGDEF(V3D_CTL_INT_STS),
REGDEF(V3D_CTL_INT_MSK_STS),
REGDEF(V3D_CLE_CT0CS),
REGDEF(V3D_CLE_CT0CA),
REGDEF(V3D_CLE_CT0EA),
REGDEF(V3D_CLE_CT1CS),
REGDEF(V3D_CLE_CT1CA),
REGDEF(V3D_CLE_CT1EA),
REGDEF(V3D_PTB_BPCA),
REGDEF(V3D_PTB_BPCS),
REGDEF(V3D_GMP_STATUS),
REGDEF(V3D_GMP_CFG),
REGDEF(V3D_GMP_VIO_ADDR),
REGDEF(V3D_ERR_FDBGO),
REGDEF(V3D_ERR_FDBGB),
REGDEF(V3D_ERR_FDBGS),
REGDEF(V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
REGDEF(V3D_CSD_STATUS),
REGDEF(V3D_CSD_CURRENT_CFG0),
REGDEF(V3D_CSD_CURRENT_CFG1),
REGDEF(V3D_CSD_CURRENT_CFG2),
REGDEF(V3D_CSD_CURRENT_CFG3),
REGDEF(V3D_CSD_CURRENT_CFG4),
REGDEF(V3D_CSD_CURRENT_CFG5),
REGDEF(V3D_CSD_CURRENT_CFG6),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
int i, core;
for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) {
seq_printf(m, "%s (0x%04x): 0x%08x\n",
v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg,
V3D_READ(v3d_hub_reg_defs[i].reg));
}
if (v3d->ver < 41) {
for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
seq_printf(m, "%s (0x%04x): 0x%08x\n",
v3d_gca_reg_defs[i].name,
v3d_gca_reg_defs[i].reg,
V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
}
}
for (core = 0; core < v3d->cores; core++) {
for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) {
seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
core,
v3d_core_reg_defs[i].name,
v3d_core_reg_defs[i].reg,
V3D_CORE_READ(core,
v3d_core_reg_defs[i].reg));
}
if (v3d_has_csd(v3d)) {
for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) {
seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
core,
v3d_csd_reg_defs[i].name,
v3d_csd_reg_defs[i].reg,
V3D_CORE_READ(core,
v3d_csd_reg_defs[i].reg));
}
}
}
return 0;
}
static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
u32 ident0, ident1, ident2, ident3, cores;
int core;
ident0 = V3D_READ(V3D_HUB_IDENT0);
ident1 = V3D_READ(V3D_HUB_IDENT1);
ident2 = V3D_READ(V3D_HUB_IDENT2);
ident3 = V3D_READ(V3D_HUB_IDENT3);
cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
seq_printf(m, "Revision: %d.%d.%d.%d\n",
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER),
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV),
V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV),
V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX));
seq_printf(m, "MMU: %s\n",
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
seq_printf(m, "TSY: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
seq_printf(m, "MSO: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO));
seq_printf(m, "L3C: %s (%dkb)\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_L3C),
V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB));
for (core = 0; core < cores; core++) {
u32 misccfg;
u32 nslc, ntmu, qups;
ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0);
ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1);
ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2);
misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG);
nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC);
ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU);
qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS);
seq_printf(m, "Core %d:\n", core);
seq_printf(m, " Revision: %d.%d\n",
V3D_GET_FIELD(ident0, V3D_IDENT0_VER),
V3D_GET_FIELD(ident1, V3D_IDENT1_REV));
seq_printf(m, " Slices: %d\n", nslc);
seq_printf(m, " TMUs: %d\n", nslc * ntmu);
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
seq_printf(m, " BCG int: %d\n",
(ident2 & V3D_IDENT2_BCG_INT) != 0);
seq_printf(m, " Override TMU: %d\n",
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
return 0;
}
static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
mutex_lock(&v3d->bo_lock);
seq_printf(m, "allocated bos: %d\n",
v3d->bo_stats.num_allocated);
seq_printf(m, "allocated bo size (kb): %ld\n",
(long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10));
mutex_unlock(&v3d->bo_lock);
return 0;
}
static int v3d_measure_clock(struct seq_file *m, void *unused)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
uint32_t cycles;
int core = 0;
int measure_ms = 1000;
if (v3d->ver >= 40) {
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT,
V3D_PCTR_S0));
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
} else {
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0,
V3D_PCTR_CYCLE_COUNT);
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN,
V3D_V3_PCTR_0_EN_ENABLE |
1);
}
msleep(measure_ms);
cycles = V3D_CORE_READ(core, V3D_PCTR_0_PCTR0);
seq_printf(m, "cycles: %d (%d.%d Mhz)\n",
cycles,
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
return 0;
}
static const struct drm_debugfs_info v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
{"measure_clock", v3d_measure_clock, 0},
{"bo_stats", v3d_debugfs_bo_stats, 0},
};
void
v3d_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_add_files(minor->dev, v3d_debugfs_list, ARRAY_SIZE(v3d_debugfs_list));
}
| linux-master | drivers/gpu/drm/v3d/v3d_debugfs.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
/**
* DOC: Interrupt management for the V3D engine
*
* When we take a bin, render, TFU done, or CSD done interrupt, we
* need to signal the fence for that job so that the scheduler can
* queue up the next one and unblock any waiters.
*
* When we take the binner out of memory interrupt, we need to
* allocate some new memory and pass it to the binner so that the
* current job can make progress.
*/
#include <linux/platform_device.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
V3D_INT_FRDONE | \
V3D_INT_CSDDONE | \
V3D_INT_GMPV))
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
V3D_HUB_INT_MMU_PTI | \
V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC))
static irqreturn_t
v3d_hub_irq(int irq, void *arg);
static void
v3d_overflow_mem_work(struct work_struct *work)
{
struct v3d_dev *v3d =
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
struct drm_gem_object *obj;
unsigned long irqflags;
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
obj = &bo->base.base;
/* We lost a race, and our work task came in after the bin job
* completed and exited. This can happen because the HW
* signals OOM before it's fully OOM, so the binner might just
* barely complete.
*
* If we lose the race and our work task comes in after a new
* bin job got scheduled, that's fine. We'll just give them
* some binner pool anyway.
*/
spin_lock_irqsave(&v3d->job_lock, irqflags);
if (!v3d->bin_job) {
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
goto out;
}
drm_gem_object_get(obj);
list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
drm_gem_object_put(obj);
}
static irqreturn_t
v3d_irq(int irq, void *arg)
{
struct v3d_dev *v3d = arg;
u32 intsts;
irqreturn_t status = IRQ_NONE;
intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
/* Acknowledge the interrupts we're handling here. */
V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
if (intsts & V3D_INT_OUTOMEM) {
/* Note that the OOM status is edge signaled, so the
* interrupt won't happen again until the we actually
* add more memory. Also, as of V3D 4.1, FLDONE won't
* be reported until any OOM state has been cleared.
*/
schedule_work(&v3d->overflow_mem_work);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->base.irq_fence);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->base.irq_fence);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_CSDDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->csd_job->base.irq_fence);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
if (intsts & V3D_INT_GMPV)
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
* didn't see the common one then check hub for MMU IRQs.
*/
if (v3d->single_irq_line && status == IRQ_NONE)
return v3d_hub_irq(irq, arg);
return status;
}
static irqreturn_t
v3d_hub_irq(int irq, void *arg)
{
struct v3d_dev *v3d = arg;
u32 intsts;
irqreturn_t status = IRQ_NONE;
intsts = V3D_READ(V3D_HUB_INT_STS);
/* Acknowledge the interrupts we're handling here. */
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->base.irq_fence);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & (V3D_HUB_INT_MMU_WRV |
V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) {
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
(v3d->va_width - 32));
static const char *const v3d41_axi_ids[] = {
"L2T",
"PTB",
"PSE",
"TLB",
"CLE",
"TFU",
"MMU",
"GMP",
};
const char *client = "?";
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
if (v3d->ver >= 41) {
axi_id = axi_id >> 5;
if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
client = v3d41_axi_ids[axi_id];
}
dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
((intsts & V3D_HUB_INT_MMU_PTI) ?
", pte invalid" : ""),
((intsts & V3D_HUB_INT_MMU_CAP) ?
", cap exceeded" : ""));
status = IRQ_HANDLED;
}
return status;
}
int
v3d_irq_init(struct v3d_dev *v3d)
{
int irq1, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
/* Clear any pending interrupts someone might have left around
* for us.
*/
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
ret = devm_request_irq(v3d->drm.dev, irq1,
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
goto fail;
} else {
v3d->single_irq_line = true;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
goto fail;
}
v3d_irq_enable(v3d);
return 0;
fail:
if (ret != -EPROBE_DEFER)
dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
return ret;
}
void
v3d_irq_enable(struct v3d_dev *v3d)
{
int core;
/* Enable our set of interrupts, masking out any others. */
for (core = 0; core < v3d->cores; core++) {
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
}
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
}
void
v3d_irq_disable(struct v3d_dev *v3d)
{
int core;
/* Disable all interrupts. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
/* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
cancel_work_sync(&v3d->overflow_mem_work);
}
/** Reinitializes interrupt registers when a GPU reset is performed. */
void v3d_irq_reset(struct v3d_dev *v3d)
{
v3d_irq_enable(v3d);
}
| linux-master | drivers/gpu/drm/v3d/v3d_irq.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2017-2018 Broadcom */
#include "v3d_drv.h"
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
{
struct v3d_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return ERR_PTR(-ENOMEM);
fence->dev = &v3d->drm;
fence->queue = queue;
fence->seqno = ++v3d->queue[queue].emit_seqno;
dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock,
v3d->queue[queue].fence_context, fence->seqno);
return &fence->base;
}
static const char *v3d_fence_get_driver_name(struct dma_fence *fence)
{
return "v3d";
}
static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
{
struct v3d_fence *f = to_v3d_fence(fence);
switch (f->queue) {
case V3D_BIN:
return "v3d-bin";
case V3D_RENDER:
return "v3d-render";
case V3D_TFU:
return "v3d-tfu";
case V3D_CSD:
return "v3d-csd";
default:
return NULL;
}
}
const struct dma_fence_ops v3d_fence_ops = {
.get_driver_name = v3d_fence_get_driver_name,
.get_timeline_name = v3d_fence_get_timeline_name,
};
| linux-master | drivers/gpu/drm/v3d/v3d_fence.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015 Broadcom */
#include "v3d_drv.h"
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "v3d_trace.h"
#endif
| linux-master | drivers/gpu/drm/v3d/v3d_trace_points.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.