python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* Generic LVDS panel driver
*
* Copyright (C) 2016 Laurent Pinchart
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*/
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
struct panel_lvds {
struct drm_panel panel;
struct device *dev;
const char *label;
unsigned int width;
unsigned int height;
struct drm_display_mode dmode;
u32 bus_flags;
unsigned int bus_format;
struct regulator *supply;
struct gpio_desc *enable_gpio;
struct gpio_desc *reset_gpio;
enum drm_panel_orientation orientation;
};
static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
{
return container_of(panel, struct panel_lvds, panel);
}
static int panel_lvds_unprepare(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
if (lvds->enable_gpio)
gpiod_set_value_cansleep(lvds->enable_gpio, 0);
if (lvds->supply)
regulator_disable(lvds->supply);
return 0;
}
static int panel_lvds_prepare(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
if (lvds->supply) {
int err;
err = regulator_enable(lvds->supply);
if (err < 0) {
dev_err(lvds->dev, "failed to enable supply: %d\n",
err);
return err;
}
}
if (lvds->enable_gpio)
gpiod_set_value_cansleep(lvds->enable_gpio, 1);
return 0;
}
static int panel_lvds_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &lvds->dmode);
if (!mode)
return 0;
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = lvds->dmode.width_mm;
connector->display_info.height_mm = lvds->dmode.height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&lvds->bus_format, 1);
connector->display_info.bus_flags = lvds->bus_flags;
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, lvds->orientation);
return 1;
}
static enum drm_panel_orientation panel_lvds_get_orientation(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
return lvds->orientation;
}
static const struct drm_panel_funcs panel_lvds_funcs = {
.unprepare = panel_lvds_unprepare,
.prepare = panel_lvds_prepare,
.get_modes = panel_lvds_get_modes,
.get_orientation = panel_lvds_get_orientation,
};
static int panel_lvds_parse_dt(struct panel_lvds *lvds)
{
struct device_node *np = lvds->dev->of_node;
int ret;
ret = of_drm_get_panel_orientation(np, &lvds->orientation);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: failed to get orientation %d\n", np, ret);
return ret;
}
ret = of_get_drm_panel_display_mode(np, &lvds->dmode, &lvds->bus_flags);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n",
np, ret);
return ret;
}
of_property_read_string(np, "label", &lvds->label);
ret = drm_of_lvds_get_data_mapping(np);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
np, "data-mapping");
return ret;
}
lvds->bus_format = ret;
lvds->bus_flags |= of_property_read_bool(np, "data-mirror") ?
DRM_BUS_FLAG_DATA_LSB_TO_MSB :
DRM_BUS_FLAG_DATA_MSB_TO_LSB;
return 0;
}
static int panel_lvds_probe(struct platform_device *pdev)
{
struct panel_lvds *lvds;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
lvds->dev = &pdev->dev;
ret = panel_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
lvds->supply = devm_regulator_get_optional(lvds->dev, "power");
if (IS_ERR(lvds->supply)) {
ret = PTR_ERR(lvds->supply);
if (ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
dev_err(lvds->dev, "failed to request regulator: %d\n",
ret);
return ret;
}
lvds->supply = NULL;
}
/* Get GPIOs and backlight controller. */
lvds->enable_gpio = devm_gpiod_get_optional(lvds->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(lvds->enable_gpio)) {
ret = PTR_ERR(lvds->enable_gpio);
dev_err(lvds->dev, "failed to request %s GPIO: %d\n",
"enable", ret);
return ret;
}
lvds->reset_gpio = devm_gpiod_get_optional(lvds->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(lvds->reset_gpio)) {
ret = PTR_ERR(lvds->reset_gpio);
dev_err(lvds->dev, "failed to request %s GPIO: %d\n",
"reset", ret);
return ret;
}
/*
* TODO: Handle all power supplies specified in the DT node in a generic
* way for panels that don't care about power supply ordering. LVDS
* panels that require a specific power sequence will need a dedicated
* driver.
*/
/* Register the panel. */
drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
DRM_MODE_CONNECTOR_LVDS);
ret = drm_panel_of_backlight(&lvds->panel);
if (ret)
return ret;
drm_panel_add(&lvds->panel);
dev_set_drvdata(lvds->dev, lvds);
return 0;
}
static void panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = platform_get_drvdata(pdev);
drm_panel_remove(&lvds->panel);
drm_panel_disable(&lvds->panel);
}
static const struct of_device_id panel_lvds_of_table[] = {
{ .compatible = "panel-lvds", },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, panel_lvds_of_table);
static struct platform_driver panel_lvds_driver = {
.probe = panel_lvds_probe,
.remove_new = panel_lvds_remove,
.driver = {
.name = "panel-lvds",
.of_match_table = panel_lvds_of_table,
},
};
module_platform_driver(panel_lvds_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("LVDS Panel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-lvds.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ld9040 AMOLED LCD drm_panel driver.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* Derived from drivers/video/backlight/ld9040.c
*
* Andrzej Hajda <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
/* Manufacturer Command Set */
#define MCS_MANPWR 0xb0
#define MCS_ELVSS_ON 0xb1
#define MCS_USER_SETTING 0xf0
#define MCS_DISPCTL 0xf2
#define MCS_POWER_CTRL 0xf4
#define MCS_GTCON 0xf7
#define MCS_PANEL_CONDITION 0xf8
#define MCS_GAMMA_SET1 0xf9
#define MCS_GAMMA_CTRL 0xfb
/* array of gamma tables for gamma value 2.2 */
static u8 const ld9040_gammas[25][22] = {
{ 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
{ 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
{ 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
{ 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
{ 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
{ 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
{ 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
{ 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
{ 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
{ 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
{ 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
{ 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
{ 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
{ 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
{ 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
{ 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
{ 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
{ 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
{ 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
{ 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
{ 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
{ 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
{ 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
{ 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
{ 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
};
struct ld9040 {
struct device *dev;
struct drm_panel panel;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
u32 power_on_delay;
u32 reset_delay;
struct videomode vm;
u32 width_mm;
u32 height_mm;
int brightness;
/* This field is tested by functions directly accessing bus before
* transfer, transfer is skipped if it is set. In case of transfer
* failure or unexpected response the field is set to error value.
* Such construct allows to eliminate many checks in higher level
* functions.
*/
int error;
};
static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
{
return container_of(panel, struct ld9040, panel);
}
static int ld9040_clear_error(struct ld9040 *ctx)
{
int ret = ctx->error;
ctx->error = 0;
return ret;
}
static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
{
struct spi_device *spi = to_spi_device(ctx->dev);
struct spi_transfer xfer = {
.len = 2,
.tx_buf = &data,
};
struct spi_message msg;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
return spi_sync(spi, &msg);
}
static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
{
int ret = 0;
if (ctx->error < 0 || len == 0)
return;
dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
ret = ld9040_spi_write_word(ctx, *data);
while (!ret && --len) {
++data;
ret = ld9040_spi_write_word(ctx, *data | 0x100);
}
if (ret) {
dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
(int)len, data);
ctx->error = ret;
}
usleep_range(300, 310);
}
#define ld9040_dcs_write_seq_static(ctx, seq...) \
({\
static const u8 d[] = { seq };\
ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
})
static void ld9040_brightness_set(struct ld9040 *ctx)
{
ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
}
static void ld9040_init(struct ld9040 *ctx)
{
ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
0x05, 0x5e, 0x96, 0x6b, 0x7d, 0x0d, 0x3f, 0x00,
0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x07, 0x05, 0x1f, 0x1f, 0x1f, 0x00, 0x00);
ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
0x02, 0x06, 0x0a, 0x10, 0x10);
ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0f, 0x00, 0x16);
ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
ld9040_brightness_set(ctx);
ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
}
static int ld9040_power_on(struct ld9040 *ctx)
{
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
msleep(ctx->power_on_delay);
gpiod_set_value(ctx->reset_gpio, 0);
msleep(ctx->reset_delay);
gpiod_set_value(ctx->reset_gpio, 1);
msleep(ctx->reset_delay);
return 0;
}
static int ld9040_power_off(struct ld9040 *ctx)
{
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static int ld9040_disable(struct drm_panel *panel)
{
return 0;
}
static int ld9040_unprepare(struct drm_panel *panel)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
msleep(120);
ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
msleep(40);
ld9040_clear_error(ctx);
return ld9040_power_off(ctx);
}
static int ld9040_prepare(struct drm_panel *panel)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
int ret;
ret = ld9040_power_on(ctx);
if (ret < 0)
return ret;
ld9040_init(ctx);
ret = ld9040_clear_error(ctx);
if (ret < 0)
ld9040_unprepare(panel);
return ret;
}
static int ld9040_enable(struct drm_panel *panel)
{
return 0;
}
static int ld9040_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
if (!mode) {
dev_err(panel->dev, "failed to create a new display mode\n");
return 0;
}
drm_display_mode_from_videomode(&ctx->vm, mode);
mode->width_mm = ctx->width_mm;
mode->height_mm = ctx->height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs ld9040_drm_funcs = {
.disable = ld9040_disable,
.unprepare = ld9040_unprepare,
.prepare = ld9040_prepare,
.enable = ld9040_enable,
.get_modes = ld9040_get_modes,
};
static int ld9040_parse_dt(struct ld9040 *ctx)
{
struct device *dev = ctx->dev;
struct device_node *np = dev->of_node;
int ret;
ret = of_get_videomode(np, &ctx->vm, 0);
if (ret < 0)
return ret;
of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
return 0;
}
static int ld9040_bl_update_status(struct backlight_device *dev)
{
struct ld9040 *ctx = bl_get_data(dev);
ctx->brightness = backlight_get_brightness(dev);
ld9040_brightness_set(ctx);
return 0;
}
static const struct backlight_ops ld9040_bl_ops = {
.update_status = ld9040_bl_update_status,
};
static const struct backlight_properties ld9040_bl_props = {
.type = BACKLIGHT_RAW,
.scale = BACKLIGHT_SCALE_NON_LINEAR,
.max_brightness = ARRAY_SIZE(ld9040_gammas) - 1,
.brightness = ARRAY_SIZE(ld9040_gammas) - 1,
};
static int ld9040_probe(struct spi_device *spi)
{
struct backlight_device *bldev;
struct device *dev = &spi->dev;
struct ld9040 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
spi_set_drvdata(spi, ctx);
ctx->dev = dev;
ctx->brightness = ld9040_bl_props.brightness;
ret = ld9040_parse_dt(ctx);
if (ret < 0)
return ret;
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return ret;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
spi->bits_per_word = 9;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(dev, "spi setup failed.\n");
return ret;
}
drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
ctx, &ld9040_bl_ops,
&ld9040_bl_props);
if (IS_ERR(bldev))
return PTR_ERR(bldev);
drm_panel_add(&ctx->panel);
return 0;
}
static void ld9040_remove(struct spi_device *spi)
{
struct ld9040 *ctx = spi_get_drvdata(spi);
ld9040_power_off(ctx);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id ld9040_of_match[] = {
{ .compatible = "samsung,ld9040" },
{ }
};
MODULE_DEVICE_TABLE(of, ld9040_of_match);
static const struct spi_device_id ld9040_ids[] = {
{ "ld9040", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, ld9040_ids);
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
.id_table = ld9040_ids,
.driver = {
.name = "panel-samsung-ld9040",
.of_match_table = ld9040_of_match,
},
};
module_spi_driver(ld9040_driver);
MODULE_AUTHOR("Andrzej Hajda <[email protected]>");
MODULE_DESCRIPTION("ld9040 LCD Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-ld9040.c |
// SPDX-License-Identifier: GPL-2.0
/*
* LG.Philips LB035Q02 LCD Panel Driver
*
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Based on the omapdrm-specific panel-lgphilips-lb035q02 driver
*
* Copyright (C) 2013 Texas Instruments Incorporated
* Author: Tomi Valkeinen <[email protected]>
*
* Based on a driver by: Steve Sakoman <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <drm/drm_connector.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct lb035q02_device {
struct drm_panel panel;
struct spi_device *spi;
struct gpio_desc *enable_gpio;
};
#define to_lb035q02_device(p) container_of(p, struct lb035q02_device, panel)
static int lb035q02_write(struct lb035q02_device *lcd, u16 reg, u16 val)
{
struct spi_message msg;
struct spi_transfer index_xfer = {
.len = 3,
.cs_change = 1,
};
struct spi_transfer value_xfer = {
.len = 3,
};
u8 buffer[16];
spi_message_init(&msg);
/* register index */
buffer[0] = 0x70;
buffer[1] = 0x00;
buffer[2] = reg & 0x7f;
index_xfer.tx_buf = buffer;
spi_message_add_tail(&index_xfer, &msg);
/* register value */
buffer[4] = 0x72;
buffer[5] = val >> 8;
buffer[6] = val;
value_xfer.tx_buf = buffer + 4;
spi_message_add_tail(&value_xfer, &msg);
return spi_sync(lcd->spi, &msg);
}
static int lb035q02_init(struct lb035q02_device *lcd)
{
/* Init sequence from page 28 of the lb035q02 spec. */
static const struct {
u16 index;
u16 value;
} init_data[] = {
{ 0x01, 0x6300 },
{ 0x02, 0x0200 },
{ 0x03, 0x0177 },
{ 0x04, 0x04c7 },
{ 0x05, 0xffc0 },
{ 0x06, 0xe806 },
{ 0x0a, 0x4008 },
{ 0x0b, 0x0000 },
{ 0x0d, 0x0030 },
{ 0x0e, 0x2800 },
{ 0x0f, 0x0000 },
{ 0x16, 0x9f80 },
{ 0x17, 0x0a0f },
{ 0x1e, 0x00c1 },
{ 0x30, 0x0300 },
{ 0x31, 0x0007 },
{ 0x32, 0x0000 },
{ 0x33, 0x0000 },
{ 0x34, 0x0707 },
{ 0x35, 0x0004 },
{ 0x36, 0x0302 },
{ 0x37, 0x0202 },
{ 0x3a, 0x0a0d },
{ 0x3b, 0x0806 },
};
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(init_data); ++i) {
ret = lb035q02_write(lcd, init_data[i].index,
init_data[i].value);
if (ret < 0)
return ret;
}
return 0;
}
static int lb035q02_disable(struct drm_panel *panel)
{
struct lb035q02_device *lcd = to_lb035q02_device(panel);
gpiod_set_value_cansleep(lcd->enable_gpio, 0);
return 0;
}
static int lb035q02_enable(struct drm_panel *panel)
{
struct lb035q02_device *lcd = to_lb035q02_device(panel);
gpiod_set_value_cansleep(lcd->enable_gpio, 1);
return 0;
}
static const struct drm_display_mode lb035q02_mode = {
.clock = 6500,
.hdisplay = 320,
.hsync_start = 320 + 20,
.hsync_end = 320 + 20 + 2,
.htotal = 320 + 20 + 2 + 68,
.vdisplay = 240,
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 2,
.vtotal = 240 + 4 + 2 + 18,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 70,
.height_mm = 53,
};
static int lb035q02_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &lb035q02_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = lb035q02_mode.width_mm;
connector->display_info.height_mm = lb035q02_mode.height_mm;
/*
* FIXME: According to the datasheet pixel data is sampled on the
* rising edge of the clock, but the code running on the Gumstix Overo
* Palo35 indicates sampling on the negative edge. This should be
* tested on a real device.
*/
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
| DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
return 1;
}
static const struct drm_panel_funcs lb035q02_funcs = {
.disable = lb035q02_disable,
.enable = lb035q02_enable,
.get_modes = lb035q02_get_modes,
};
static int lb035q02_probe(struct spi_device *spi)
{
struct lb035q02_device *lcd;
int ret;
lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
lcd->enable_gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(lcd->enable_gpio)) {
dev_err(&spi->dev, "failed to parse enable gpio\n");
return PTR_ERR(lcd->enable_gpio);
}
ret = lb035q02_init(lcd);
if (ret < 0)
return ret;
drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&lcd->panel);
return 0;
}
static void lb035q02_remove(struct spi_device *spi)
{
struct lb035q02_device *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
}
static const struct of_device_id lb035q02_of_match[] = {
{ .compatible = "lgphilips,lb035q02", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lb035q02_of_match);
static const struct spi_device_id lb035q02_ids[] = {
{ "lb035q02", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, lb035q02_ids);
static struct spi_driver lb035q02_driver = {
.probe = lb035q02_probe,
.remove = lb035q02_remove,
.id_table = lb035q02_ids,
.driver = {
.name = "panel-lg-lb035q02",
.of_match_table = lb035q02_of_match,
},
};
module_spi_driver(lb035q02_driver);
MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-lg-lb035q02.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct khadas_ts050_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
struct regulator *supply;
struct gpio_desc *reset_gpio;
struct gpio_desc *enable_gpio;
bool prepared;
bool enabled;
};
struct khadas_ts050_panel_cmd {
u8 cmd;
u8 data;
};
/* Only the CMD1 User Command set is documented */
static const struct khadas_ts050_panel_cmd init_code[] = {
/* Select Unknown CMD Page (Undocumented) */
{0xff, 0xee},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
{0x1f, 0x45},
{0x24, 0x4f},
{0x38, 0xc8},
{0x39, 0x27},
{0x1e, 0x77},
{0x1d, 0x0f},
{0x7e, 0x71},
{0x7c, 0x03},
{0xff, 0x00},
{0xfb, 0x01},
{0x35, 0x01},
/* Select CMD2 Page0 (Undocumented) */
{0xff, 0x01},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
{0x00, 0x01},
{0x01, 0x55},
{0x02, 0x40},
{0x05, 0x40},
{0x06, 0x4a},
{0x07, 0x24},
{0x08, 0x0c},
{0x0b, 0x7d},
{0x0c, 0x7d},
{0x0e, 0xb0},
{0x0f, 0xae},
{0x11, 0x10},
{0x12, 0x10},
{0x13, 0x03},
{0x14, 0x4a},
{0x15, 0x12},
{0x16, 0x12},
{0x18, 0x00},
{0x19, 0x77},
{0x1a, 0x55},
{0x1b, 0x13},
{0x1c, 0x00},
{0x1d, 0x00},
{0x1e, 0x13},
{0x1f, 0x00},
{0x23, 0x00},
{0x24, 0x00},
{0x25, 0x00},
{0x26, 0x00},
{0x27, 0x00},
{0x28, 0x00},
{0x35, 0x00},
{0x66, 0x00},
{0x58, 0x82},
{0x59, 0x02},
{0x5a, 0x02},
{0x5b, 0x02},
{0x5c, 0x82},
{0x5d, 0x82},
{0x5e, 0x02},
{0x5f, 0x02},
{0x72, 0x31},
/* Select CMD2 Page4 (Undocumented) */
{0xff, 0x05},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
{0x00, 0x01},
{0x01, 0x0b},
{0x02, 0x0c},
{0x03, 0x09},
{0x04, 0x0a},
{0x05, 0x00},
{0x06, 0x0f},
{0x07, 0x10},
{0x08, 0x00},
{0x09, 0x00},
{0x0a, 0x00},
{0x0b, 0x00},
{0x0c, 0x00},
{0x0d, 0x13},
{0x0e, 0x15},
{0x0f, 0x17},
{0x10, 0x01},
{0x11, 0x0b},
{0x12, 0x0c},
{0x13, 0x09},
{0x14, 0x0a},
{0x15, 0x00},
{0x16, 0x0f},
{0x17, 0x10},
{0x18, 0x00},
{0x19, 0x00},
{0x1a, 0x00},
{0x1b, 0x00},
{0x1c, 0x00},
{0x1d, 0x13},
{0x1e, 0x15},
{0x1f, 0x17},
{0x20, 0x00},
{0x21, 0x03},
{0x22, 0x01},
{0x23, 0x40},
{0x24, 0x40},
{0x25, 0xed},
{0x29, 0x58},
{0x2a, 0x12},
{0x2b, 0x01},
{0x4b, 0x06},
{0x4c, 0x11},
{0x4d, 0x20},
{0x4e, 0x02},
{0x4f, 0x02},
{0x50, 0x20},
{0x51, 0x61},
{0x52, 0x01},
{0x53, 0x63},
{0x54, 0x77},
{0x55, 0xed},
{0x5b, 0x00},
{0x5c, 0x00},
{0x5d, 0x00},
{0x5e, 0x00},
{0x5f, 0x15},
{0x60, 0x75},
{0x61, 0x00},
{0x62, 0x00},
{0x63, 0x00},
{0x64, 0x00},
{0x65, 0x00},
{0x66, 0x00},
{0x67, 0x00},
{0x68, 0x04},
{0x69, 0x00},
{0x6a, 0x00},
{0x6c, 0x40},
{0x75, 0x01},
{0x76, 0x01},
{0x7a, 0x80},
{0x7b, 0xa3},
{0x7c, 0xd8},
{0x7d, 0x60},
{0x7f, 0x15},
{0x80, 0x81},
{0x83, 0x05},
{0x93, 0x08},
{0x94, 0x10},
{0x8a, 0x00},
{0x9b, 0x0f},
{0xea, 0xff},
{0xec, 0x00},
/* Select CMD2 Page0 (Undocumented) */
{0xff, 0x01},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
{0x75, 0x00},
{0x76, 0xdf},
{0x77, 0x00},
{0x78, 0xe4},
{0x79, 0x00},
{0x7a, 0xed},
{0x7b, 0x00},
{0x7c, 0xf6},
{0x7d, 0x00},
{0x7e, 0xff},
{0x7f, 0x01},
{0x80, 0x07},
{0x81, 0x01},
{0x82, 0x10},
{0x83, 0x01},
{0x84, 0x18},
{0x85, 0x01},
{0x86, 0x20},
{0x87, 0x01},
{0x88, 0x3d},
{0x89, 0x01},
{0x8a, 0x56},
{0x8b, 0x01},
{0x8c, 0x84},
{0x8d, 0x01},
{0x8e, 0xab},
{0x8f, 0x01},
{0x90, 0xec},
{0x91, 0x02},
{0x92, 0x22},
{0x93, 0x02},
{0x94, 0x23},
{0x95, 0x02},
{0x96, 0x55},
{0x97, 0x02},
{0x98, 0x8b},
{0x99, 0x02},
{0x9a, 0xaf},
{0x9b, 0x02},
{0x9c, 0xdf},
{0x9d, 0x03},
{0x9e, 0x01},
{0x9f, 0x03},
{0xa0, 0x2c},
{0xa2, 0x03},
{0xa3, 0x39},
{0xa4, 0x03},
{0xa5, 0x47},
{0xa6, 0x03},
{0xa7, 0x56},
{0xa9, 0x03},
{0xaa, 0x66},
{0xab, 0x03},
{0xac, 0x76},
{0xad, 0x03},
{0xae, 0x85},
{0xaf, 0x03},
{0xb0, 0x90},
{0xb1, 0x03},
{0xb2, 0xcb},
{0xb3, 0x00},
{0xb4, 0xdf},
{0xb5, 0x00},
{0xb6, 0xe4},
{0xb7, 0x00},
{0xb8, 0xed},
{0xb9, 0x00},
{0xba, 0xf6},
{0xbb, 0x00},
{0xbc, 0xff},
{0xbd, 0x01},
{0xbe, 0x07},
{0xbf, 0x01},
{0xc0, 0x10},
{0xc1, 0x01},
{0xc2, 0x18},
{0xc3, 0x01},
{0xc4, 0x20},
{0xc5, 0x01},
{0xc6, 0x3d},
{0xc7, 0x01},
{0xc8, 0x56},
{0xc9, 0x01},
{0xca, 0x84},
{0xcb, 0x01},
{0xcc, 0xab},
{0xcd, 0x01},
{0xce, 0xec},
{0xcf, 0x02},
{0xd0, 0x22},
{0xd1, 0x02},
{0xd2, 0x23},
{0xd3, 0x02},
{0xd4, 0x55},
{0xd5, 0x02},
{0xd6, 0x8b},
{0xd7, 0x02},
{0xd8, 0xaf},
{0xd9, 0x02},
{0xda, 0xdf},
{0xdb, 0x03},
{0xdc, 0x01},
{0xdd, 0x03},
{0xde, 0x2c},
{0xdf, 0x03},
{0xe0, 0x39},
{0xe1, 0x03},
{0xe2, 0x47},
{0xe3, 0x03},
{0xe4, 0x56},
{0xe5, 0x03},
{0xe6, 0x66},
{0xe7, 0x03},
{0xe8, 0x76},
{0xe9, 0x03},
{0xea, 0x85},
{0xeb, 0x03},
{0xec, 0x90},
{0xed, 0x03},
{0xee, 0xcb},
{0xef, 0x00},
{0xf0, 0xbb},
{0xf1, 0x00},
{0xf2, 0xc0},
{0xf3, 0x00},
{0xf4, 0xcc},
{0xf5, 0x00},
{0xf6, 0xd6},
{0xf7, 0x00},
{0xf8, 0xe1},
{0xf9, 0x00},
{0xfa, 0xea},
/* Select CMD2 Page2 (Undocumented) */
{0xff, 0x02},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
{0x00, 0x00},
{0x01, 0xf4},
{0x02, 0x00},
{0x03, 0xef},
{0x04, 0x01},
{0x05, 0x07},
{0x06, 0x01},
{0x07, 0x28},
{0x08, 0x01},
{0x09, 0x44},
{0x0a, 0x01},
{0x0b, 0x76},
{0x0c, 0x01},
{0x0d, 0xa0},
{0x0e, 0x01},
{0x0f, 0xe7},
{0x10, 0x02},
{0x11, 0x1f},
{0x12, 0x02},
{0x13, 0x22},
{0x14, 0x02},
{0x15, 0x54},
{0x16, 0x02},
{0x17, 0x8b},
{0x18, 0x02},
{0x19, 0xaf},
{0x1a, 0x02},
{0x1b, 0xe0},
{0x1c, 0x03},
{0x1d, 0x01},
{0x1e, 0x03},
{0x1f, 0x2d},
{0x20, 0x03},
{0x21, 0x39},
{0x22, 0x03},
{0x23, 0x47},
{0x24, 0x03},
{0x25, 0x57},
{0x26, 0x03},
{0x27, 0x65},
{0x28, 0x03},
{0x29, 0x77},
{0x2a, 0x03},
{0x2b, 0x85},
{0x2d, 0x03},
{0x2f, 0x8f},
{0x30, 0x03},
{0x31, 0xcb},
{0x32, 0x00},
{0x33, 0xbb},
{0x34, 0x00},
{0x35, 0xc0},
{0x36, 0x00},
{0x37, 0xcc},
{0x38, 0x00},
{0x39, 0xd6},
{0x3a, 0x00},
{0x3b, 0xe1},
{0x3d, 0x00},
{0x3f, 0xea},
{0x40, 0x00},
{0x41, 0xf4},
{0x42, 0x00},
{0x43, 0xfe},
{0x44, 0x01},
{0x45, 0x07},
{0x46, 0x01},
{0x47, 0x28},
{0x48, 0x01},
{0x49, 0x44},
{0x4a, 0x01},
{0x4b, 0x76},
{0x4c, 0x01},
{0x4d, 0xa0},
{0x4e, 0x01},
{0x4f, 0xe7},
{0x50, 0x02},
{0x51, 0x1f},
{0x52, 0x02},
{0x53, 0x22},
{0x54, 0x02},
{0x55, 0x54},
{0x56, 0x02},
{0x58, 0x8b},
{0x59, 0x02},
{0x5a, 0xaf},
{0x5b, 0x02},
{0x5c, 0xe0},
{0x5d, 0x03},
{0x5e, 0x01},
{0x5f, 0x03},
{0x60, 0x2d},
{0x61, 0x03},
{0x62, 0x39},
{0x63, 0x03},
{0x64, 0x47},
{0x65, 0x03},
{0x66, 0x57},
{0x67, 0x03},
{0x68, 0x65},
{0x69, 0x03},
{0x6a, 0x77},
{0x6b, 0x03},
{0x6c, 0x85},
{0x6d, 0x03},
{0x6e, 0x8f},
{0x6f, 0x03},
{0x70, 0xcb},
{0x71, 0x00},
{0x72, 0x00},
{0x73, 0x00},
{0x74, 0x21},
{0x75, 0x00},
{0x76, 0x4c},
{0x77, 0x00},
{0x78, 0x6b},
{0x79, 0x00},
{0x7a, 0x85},
{0x7b, 0x00},
{0x7c, 0x9a},
{0x7d, 0x00},
{0x7e, 0xad},
{0x7f, 0x00},
{0x80, 0xbe},
{0x81, 0x00},
{0x82, 0xcd},
{0x83, 0x01},
{0x84, 0x01},
{0x85, 0x01},
{0x86, 0x29},
{0x87, 0x01},
{0x88, 0x68},
{0x89, 0x01},
{0x8a, 0x98},
{0x8b, 0x01},
{0x8c, 0xe5},
{0x8d, 0x02},
{0x8e, 0x1e},
{0x8f, 0x02},
{0x90, 0x30},
{0x91, 0x02},
{0x92, 0x52},
{0x93, 0x02},
{0x94, 0x88},
{0x95, 0x02},
{0x96, 0xaa},
{0x97, 0x02},
{0x98, 0xd7},
{0x99, 0x02},
{0x9a, 0xf7},
{0x9b, 0x03},
{0x9c, 0x21},
{0x9d, 0x03},
{0x9e, 0x2e},
{0x9f, 0x03},
{0xa0, 0x3d},
{0xa2, 0x03},
{0xa3, 0x4c},
{0xa4, 0x03},
{0xa5, 0x5e},
{0xa6, 0x03},
{0xa7, 0x71},
{0xa9, 0x03},
{0xaa, 0x86},
{0xab, 0x03},
{0xac, 0x94},
{0xad, 0x03},
{0xae, 0xfa},
{0xaf, 0x00},
{0xb0, 0x00},
{0xb1, 0x00},
{0xb2, 0x21},
{0xb3, 0x00},
{0xb4, 0x4c},
{0xb5, 0x00},
{0xb6, 0x6b},
{0xb7, 0x00},
{0xb8, 0x85},
{0xb9, 0x00},
{0xba, 0x9a},
{0xbb, 0x00},
{0xbc, 0xad},
{0xbd, 0x00},
{0xbe, 0xbe},
{0xbf, 0x00},
{0xc0, 0xcd},
{0xc1, 0x01},
{0xc2, 0x01},
{0xc3, 0x01},
{0xc4, 0x29},
{0xc5, 0x01},
{0xc6, 0x68},
{0xc7, 0x01},
{0xc8, 0x98},
{0xc9, 0x01},
{0xca, 0xe5},
{0xcb, 0x02},
{0xcc, 0x1e},
{0xcd, 0x02},
{0xce, 0x20},
{0xcf, 0x02},
{0xd0, 0x52},
{0xd1, 0x02},
{0xd2, 0x88},
{0xd3, 0x02},
{0xd4, 0xaa},
{0xd5, 0x02},
{0xd6, 0xd7},
{0xd7, 0x02},
{0xd8, 0xf7},
{0xd9, 0x03},
{0xda, 0x21},
{0xdb, 0x03},
{0xdc, 0x2e},
{0xdd, 0x03},
{0xde, 0x3d},
{0xdf, 0x03},
{0xe0, 0x4c},
{0xe1, 0x03},
{0xe2, 0x5e},
{0xe3, 0x03},
{0xe4, 0x71},
{0xe5, 0x03},
{0xe6, 0x86},
{0xe7, 0x03},
{0xe8, 0x94},
{0xe9, 0x03},
{0xea, 0xfa},
/* Select CMD2 Page0 (Undocumented) */
{0xff, 0x01},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
/* Select CMD2 Page1 (Undocumented) */
{0xff, 0x02},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
/* Select CMD2 Page3 (Undocumented) */
{0xff, 0x04},
/* Reload CMD1: Don't reload default value to register */
{0xfb, 0x01},
/* Select CMD1 */
{0xff, 0x00},
{0xd3, 0x22}, /* RGBMIPICTRL: VSYNC back porch = 34 */
{0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
};
static inline
struct khadas_ts050_panel *to_khadas_ts050_panel(struct drm_panel *panel)
{
return container_of(panel, struct khadas_ts050_panel, base);
}
static int khadas_ts050_panel_prepare(struct drm_panel *panel)
{
struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
unsigned int i;
int err;
if (khadas_ts050->prepared)
return 0;
gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
err = regulator_enable(khadas_ts050->supply);
if (err < 0)
return err;
gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 1);
msleep(60);
gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
usleep_range(10000, 11000);
gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 0);
/* Select CMD2 page 4 (Undocumented) */
mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x05 }, 1);
/* Reload CMD1: Don't reload default value to register */
mipi_dsi_dcs_write(khadas_ts050->link, 0xfb, (u8[]){ 0x01 }, 1);
mipi_dsi_dcs_write(khadas_ts050->link, 0xc5, (u8[]){ 0x01 }, 1);
msleep(100);
for (i = 0; i < ARRAY_SIZE(init_code); i++) {
err = mipi_dsi_dcs_write(khadas_ts050->link,
init_code[i].cmd,
&init_code[i].data, 1);
if (err < 0) {
dev_err(panel->dev, "failed write cmds: %d\n", err);
goto poweroff;
}
}
err = mipi_dsi_dcs_exit_sleep_mode(khadas_ts050->link);
if (err < 0) {
dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
msleep(120);
/* Select CMD1 */
mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x00 }, 1);
err = mipi_dsi_dcs_set_tear_on(khadas_ts050->link,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (err < 0) {
dev_err(panel->dev, "failed to set tear on: %d\n", err);
goto poweroff;
}
err = mipi_dsi_dcs_set_display_on(khadas_ts050->link);
if (err < 0) {
dev_err(panel->dev, "failed to set display on: %d\n", err);
goto poweroff;
}
usleep_range(10000, 11000);
khadas_ts050->prepared = true;
return 0;
poweroff:
gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
regulator_disable(khadas_ts050->supply);
return err;
}
static int khadas_ts050_panel_unprepare(struct drm_panel *panel)
{
struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
int err;
if (!khadas_ts050->prepared)
return 0;
khadas_ts050->prepared = false;
err = mipi_dsi_dcs_enter_sleep_mode(khadas_ts050->link);
if (err < 0)
dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
msleep(150);
gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
err = regulator_disable(khadas_ts050->supply);
if (err < 0)
return err;
return 0;
}
static int khadas_ts050_panel_enable(struct drm_panel *panel)
{
struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
khadas_ts050->enabled = true;
return 0;
}
static int khadas_ts050_panel_disable(struct drm_panel *panel)
{
struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
int err;
if (!khadas_ts050->enabled)
return 0;
err = mipi_dsi_dcs_set_display_off(khadas_ts050->link);
if (err < 0)
dev_err(panel->dev, "failed to set display off: %d\n", err);
usleep_range(10000, 11000);
khadas_ts050->enabled = false;
return 0;
}
static const struct drm_display_mode default_mode = {
.clock = 160000,
.hdisplay = 1080,
.hsync_start = 1080 + 117,
.hsync_end = 1080 + 117 + 5,
.htotal = 1080 + 117 + 5 + 160,
.vdisplay = 1920,
.vsync_start = 1920 + 4,
.vsync_end = 1920 + 4 + 3,
.vtotal = 1920 + 4 + 3 + 31,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static int khadas_ts050_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 64;
connector->display_info.height_mm = 118;
connector->display_info.bpc = 8;
return 1;
}
static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
.prepare = khadas_ts050_panel_prepare,
.unprepare = khadas_ts050_panel_unprepare,
.enable = khadas_ts050_panel_enable,
.disable = khadas_ts050_panel_disable,
.get_modes = khadas_ts050_panel_get_modes,
};
static const struct of_device_id khadas_ts050_of_match[] = {
{ .compatible = "khadas,ts050", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
{
struct device *dev = &khadas_ts050->link->dev;
int err;
khadas_ts050->supply = devm_regulator_get(dev, "power");
if (IS_ERR(khadas_ts050->supply))
return dev_err_probe(dev, PTR_ERR(khadas_ts050->supply),
"failed to get power supply");
khadas_ts050->reset_gpio = devm_gpiod_get(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(khadas_ts050->reset_gpio))
return dev_err_probe(dev, PTR_ERR(khadas_ts050->reset_gpio),
"failed to get reset gpio");
khadas_ts050->enable_gpio = devm_gpiod_get(dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(khadas_ts050->enable_gpio))
return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
"failed to get enable gpio");
drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
&khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
err = drm_panel_of_backlight(&khadas_ts050->base);
if (err)
return err;
drm_panel_add(&khadas_ts050->base);
return 0;
}
static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
{
struct khadas_ts050_panel *khadas_ts050;
int err;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
GFP_KERNEL);
if (!khadas_ts050)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, khadas_ts050);
khadas_ts050->link = dsi;
err = khadas_ts050_panel_add(khadas_ts050);
if (err < 0)
return err;
err = mipi_dsi_attach(dsi);
if (err)
drm_panel_remove(&khadas_ts050->base);
return err;
}
static void khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
{
struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
int err;
err = mipi_dsi_detach(dsi);
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&khadas_ts050->base);
drm_panel_disable(&khadas_ts050->base);
drm_panel_unprepare(&khadas_ts050->base);
}
static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
drm_panel_disable(&khadas_ts050->base);
drm_panel_unprepare(&khadas_ts050->base);
}
static struct mipi_dsi_driver khadas_ts050_panel_driver = {
.driver = {
.name = "panel-khadas-ts050",
.of_match_table = khadas_ts050_of_match,
},
.probe = khadas_ts050_panel_probe,
.remove = khadas_ts050_panel_remove,
.shutdown = khadas_ts050_panel_shutdown,
};
module_mipi_dsi_driver(khadas_ts050_panel_driver);
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_DESCRIPTION("Khadas TS050 panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-khadas-ts050.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 NXP Semiconductors.
* Author: Marco Franchi <[email protected]>
*
* Based on Panel Simple driver by Thierry Reding <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
struct seiko_panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
const struct display_timing *timings;
unsigned int num_timings;
unsigned int bpc;
/**
* @width: width (in millimeters) of the panel's active display area
* @height: height (in millimeters) of the panel's active display area
*/
struct {
unsigned int width;
unsigned int height;
} size;
u32 bus_format;
u32 bus_flags;
};
struct seiko_panel {
struct drm_panel base;
bool prepared;
bool enabled;
const struct seiko_panel_desc *desc;
struct regulator *dvdd;
struct regulator *avdd;
struct gpio_desc *enable_gpio;
};
static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
{
return container_of(panel, struct seiko_panel, base);
}
static int seiko_panel_get_fixed_modes(struct seiko_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
unsigned int i, num = 0;
if (!panel->desc)
return 0;
for (i = 0; i < panel->desc->num_timings; i++) {
const struct display_timing *dt = &panel->desc->timings[i];
struct videomode vm;
videomode_from_timing(dt, &vm);
mode = drm_mode_create(connector->dev);
if (!mode) {
dev_err(panel->base.dev, "failed to add mode %ux%u\n",
dt->hactive.typ, dt->vactive.typ);
continue;
}
drm_display_mode_from_videomode(&vm, mode);
mode->type |= DRM_MODE_TYPE_DRIVER;
if (panel->desc->num_timings == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num++;
}
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay,
drm_mode_vrefresh(m));
continue;
}
mode->type |= DRM_MODE_TYPE_DRIVER;
if (panel->desc->num_modes == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
num++;
}
connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
if (panel->desc->bus_format)
drm_display_info_set_bus_formats(&connector->display_info,
&panel->desc->bus_format, 1);
connector->display_info.bus_flags = panel->desc->bus_flags;
return num;
}
static int seiko_panel_disable(struct drm_panel *panel)
{
struct seiko_panel *p = to_seiko_panel(panel);
if (!p->enabled)
return 0;
p->enabled = false;
return 0;
}
static int seiko_panel_unprepare(struct drm_panel *panel)
{
struct seiko_panel *p = to_seiko_panel(panel);
if (!p->prepared)
return 0;
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->avdd);
/* Add a 100ms delay as per the panel datasheet */
msleep(100);
regulator_disable(p->dvdd);
p->prepared = false;
return 0;
}
static int seiko_panel_prepare(struct drm_panel *panel)
{
struct seiko_panel *p = to_seiko_panel(panel);
int err;
if (p->prepared)
return 0;
err = regulator_enable(p->dvdd);
if (err < 0) {
dev_err(panel->dev, "failed to enable dvdd: %d\n", err);
return err;
}
/* Add a 100ms delay as per the panel datasheet */
msleep(100);
err = regulator_enable(p->avdd);
if (err < 0) {
dev_err(panel->dev, "failed to enable avdd: %d\n", err);
goto disable_dvdd;
}
gpiod_set_value_cansleep(p->enable_gpio, 1);
p->prepared = true;
return 0;
disable_dvdd:
regulator_disable(p->dvdd);
return err;
}
static int seiko_panel_enable(struct drm_panel *panel)
{
struct seiko_panel *p = to_seiko_panel(panel);
if (p->enabled)
return 0;
p->enabled = true;
return 0;
}
static int seiko_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct seiko_panel *p = to_seiko_panel(panel);
/* add hard-coded panel modes */
return seiko_panel_get_fixed_modes(p, connector);
}
static int seiko_panel_get_timings(struct drm_panel *panel,
unsigned int num_timings,
struct display_timing *timings)
{
struct seiko_panel *p = to_seiko_panel(panel);
unsigned int i;
if (p->desc->num_timings < num_timings)
num_timings = p->desc->num_timings;
if (timings)
for (i = 0; i < num_timings; i++)
timings[i] = p->desc->timings[i];
return p->desc->num_timings;
}
static const struct drm_panel_funcs seiko_panel_funcs = {
.disable = seiko_panel_disable,
.unprepare = seiko_panel_unprepare,
.prepare = seiko_panel_prepare,
.enable = seiko_panel_enable,
.get_modes = seiko_panel_get_modes,
.get_timings = seiko_panel_get_timings,
};
static int seiko_panel_probe(struct device *dev,
const struct seiko_panel_desc *desc)
{
struct seiko_panel *panel;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return -ENOMEM;
panel->enabled = false;
panel->prepared = false;
panel->desc = desc;
panel->dvdd = devm_regulator_get(dev, "dvdd");
if (IS_ERR(panel->dvdd))
return PTR_ERR(panel->dvdd);
panel->avdd = devm_regulator_get(dev, "avdd");
if (IS_ERR(panel->avdd))
return PTR_ERR(panel->avdd);
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio))
return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
"failed to request GPIO\n");
drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
DRM_MODE_CONNECTOR_DPI);
err = drm_panel_of_backlight(&panel->base);
if (err)
return err;
drm_panel_add(&panel->base);
dev_set_drvdata(dev, panel);
return 0;
}
static void seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = platform_get_drvdata(pdev);
drm_panel_remove(&panel->base);
drm_panel_disable(&panel->base);
}
static void seiko_panel_shutdown(struct platform_device *pdev)
{
struct seiko_panel *panel = platform_get_drvdata(pdev);
drm_panel_disable(&panel->base);
}
static const struct display_timing seiko_43wvf1g_timing = {
.pixelclock = { 33500000, 33500000, 33500000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 164, 164, 164 },
.hback_porch = { 89, 89, 89 },
.hsync_len = { 10, 10, 10 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 10, 10, 10 },
.vback_porch = { 23, 23, 23 },
.vsync_len = { 10, 10, 10 },
.flags = DISPLAY_FLAGS_DE_LOW,
};
static const struct seiko_panel_desc seiko_43wvf1g = {
.timings = &seiko_43wvf1g_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 93,
.height = 57,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct of_device_id platform_of_match[] = {
{
.compatible = "sii,43wvf1g",
.data = &seiko_43wvf1g,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, platform_of_match);
static int seiko_panel_platform_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
id = of_match_node(platform_of_match, pdev->dev.of_node);
if (!id)
return -ENODEV;
return seiko_panel_probe(&pdev->dev, id->data);
}
static struct platform_driver seiko_panel_platform_driver = {
.driver = {
.name = "seiko_panel",
.of_match_table = platform_of_match,
},
.probe = seiko_panel_platform_probe,
.remove_new = seiko_panel_remove,
.shutdown = seiko_panel_shutdown,
};
module_platform_driver(seiko_panel_platform_driver);
MODULE_AUTHOR("Marco Franchi <[email protected]>");
MODULE_DESCRIPTION("Seiko 43WVF1G panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-seiko-43wvf1g.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
static const char * const regulator_names[] = {
"vdda",
"vdispp",
"vdispn",
};
static unsigned long const regulator_enable_loads[] = {
62000,
100000,
100000,
};
static unsigned long const regulator_disable_loads[] = {
80,
100,
100,
};
struct cmd_set {
u8 commands[4];
u8 size;
};
struct nt35597_config {
u32 width_mm;
u32 height_mm;
const char *panel_name;
const struct cmd_set *panel_on_cmds;
u32 num_on_cmds;
const struct drm_display_mode *dm;
};
struct truly_nt35597 {
struct device *dev;
struct drm_panel panel;
struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
struct gpio_desc *reset_gpio;
struct gpio_desc *mode_gpio;
struct backlight_device *backlight;
struct mipi_dsi_device *dsi[2];
const struct nt35597_config *config;
bool prepared;
bool enabled;
};
static inline struct truly_nt35597 *panel_to_ctx(struct drm_panel *panel)
{
return container_of(panel, struct truly_nt35597, panel);
}
static const struct cmd_set qcom_2k_panel_magic_cmds[] = {
/* CMD2_P0 */
{ { 0xff, 0x20 }, 2 },
{ { 0xfb, 0x01 }, 2 },
{ { 0x00, 0x01 }, 2 },
{ { 0x01, 0x55 }, 2 },
{ { 0x02, 0x45 }, 2 },
{ { 0x05, 0x40 }, 2 },
{ { 0x06, 0x19 }, 2 },
{ { 0x07, 0x1e }, 2 },
{ { 0x0b, 0x73 }, 2 },
{ { 0x0c, 0x73 }, 2 },
{ { 0x0e, 0xb0 }, 2 },
{ { 0x0f, 0xae }, 2 },
{ { 0x11, 0xb8 }, 2 },
{ { 0x13, 0x00 }, 2 },
{ { 0x58, 0x80 }, 2 },
{ { 0x59, 0x01 }, 2 },
{ { 0x5a, 0x00 }, 2 },
{ { 0x5b, 0x01 }, 2 },
{ { 0x5c, 0x80 }, 2 },
{ { 0x5d, 0x81 }, 2 },
{ { 0x5e, 0x00 }, 2 },
{ { 0x5f, 0x01 }, 2 },
{ { 0x72, 0x11 }, 2 },
{ { 0x68, 0x03 }, 2 },
/* CMD2_P4 */
{ { 0xFF, 0x24 }, 2 },
{ { 0xFB, 0x01 }, 2 },
{ { 0x00, 0x1C }, 2 },
{ { 0x01, 0x0B }, 2 },
{ { 0x02, 0x0C }, 2 },
{ { 0x03, 0x01 }, 2 },
{ { 0x04, 0x0F }, 2 },
{ { 0x05, 0x10 }, 2 },
{ { 0x06, 0x10 }, 2 },
{ { 0x07, 0x10 }, 2 },
{ { 0x08, 0x89 }, 2 },
{ { 0x09, 0x8A }, 2 },
{ { 0x0A, 0x13 }, 2 },
{ { 0x0B, 0x13 }, 2 },
{ { 0x0C, 0x15 }, 2 },
{ { 0x0D, 0x15 }, 2 },
{ { 0x0E, 0x17 }, 2 },
{ { 0x0F, 0x17 }, 2 },
{ { 0x10, 0x1C }, 2 },
{ { 0x11, 0x0B }, 2 },
{ { 0x12, 0x0C }, 2 },
{ { 0x13, 0x01 }, 2 },
{ { 0x14, 0x0F }, 2 },
{ { 0x15, 0x10 }, 2 },
{ { 0x16, 0x10 }, 2 },
{ { 0x17, 0x10 }, 2 },
{ { 0x18, 0x89 }, 2 },
{ { 0x19, 0x8A }, 2 },
{ { 0x1A, 0x13 }, 2 },
{ { 0x1B, 0x13 }, 2 },
{ { 0x1C, 0x15 }, 2 },
{ { 0x1D, 0x15 }, 2 },
{ { 0x1E, 0x17 }, 2 },
{ { 0x1F, 0x17 }, 2 },
/* STV */
{ { 0x20, 0x40 }, 2 },
{ { 0x21, 0x01 }, 2 },
{ { 0x22, 0x00 }, 2 },
{ { 0x23, 0x40 }, 2 },
{ { 0x24, 0x40 }, 2 },
{ { 0x25, 0x6D }, 2 },
{ { 0x26, 0x40 }, 2 },
{ { 0x27, 0x40 }, 2 },
/* Vend */
{ { 0xE0, 0x00 }, 2 },
{ { 0xDC, 0x21 }, 2 },
{ { 0xDD, 0x22 }, 2 },
{ { 0xDE, 0x07 }, 2 },
{ { 0xDF, 0x07 }, 2 },
{ { 0xE3, 0x6D }, 2 },
{ { 0xE1, 0x07 }, 2 },
{ { 0xE2, 0x07 }, 2 },
/* UD */
{ { 0x29, 0xD8 }, 2 },
{ { 0x2A, 0x2A }, 2 },
/* CLK */
{ { 0x4B, 0x03 }, 2 },
{ { 0x4C, 0x11 }, 2 },
{ { 0x4D, 0x10 }, 2 },
{ { 0x4E, 0x01 }, 2 },
{ { 0x4F, 0x01 }, 2 },
{ { 0x50, 0x10 }, 2 },
{ { 0x51, 0x00 }, 2 },
{ { 0x52, 0x80 }, 2 },
{ { 0x53, 0x00 }, 2 },
{ { 0x56, 0x00 }, 2 },
{ { 0x54, 0x07 }, 2 },
{ { 0x58, 0x07 }, 2 },
{ { 0x55, 0x25 }, 2 },
/* Reset XDONB */
{ { 0x5B, 0x43 }, 2 },
{ { 0x5C, 0x00 }, 2 },
{ { 0x5F, 0x73 }, 2 },
{ { 0x60, 0x73 }, 2 },
{ { 0x63, 0x22 }, 2 },
{ { 0x64, 0x00 }, 2 },
{ { 0x67, 0x08 }, 2 },
{ { 0x68, 0x04 }, 2 },
/* Resolution:1440x2560 */
{ { 0x72, 0x02 }, 2 },
/* mux */
{ { 0x7A, 0x80 }, 2 },
{ { 0x7B, 0x91 }, 2 },
{ { 0x7C, 0xD8 }, 2 },
{ { 0x7D, 0x60 }, 2 },
{ { 0x7F, 0x15 }, 2 },
{ { 0x75, 0x15 }, 2 },
/* ABOFF */
{ { 0xB3, 0xC0 }, 2 },
{ { 0xB4, 0x00 }, 2 },
{ { 0xB5, 0x00 }, 2 },
/* Source EQ */
{ { 0x78, 0x00 }, 2 },
{ { 0x79, 0x00 }, 2 },
{ { 0x80, 0x00 }, 2 },
{ { 0x83, 0x00 }, 2 },
/* FP BP */
{ { 0x93, 0x0A }, 2 },
{ { 0x94, 0x0A }, 2 },
/* Inversion Type */
{ { 0x8A, 0x00 }, 2 },
{ { 0x9B, 0xFF }, 2 },
/* IMGSWAP =1 @PortSwap=1 */
{ { 0x9D, 0xB0 }, 2 },
{ { 0x9F, 0x63 }, 2 },
{ { 0x98, 0x10 }, 2 },
/* FRM */
{ { 0xEC, 0x00 }, 2 },
/* CMD1 */
{ { 0xFF, 0x10 }, 2 },
/* VBP+VSA=,VFP = 10H */
{ { 0x3B, 0x03, 0x0A, 0x0A }, 4 },
/* FTE on */
{ { 0x35, 0x00 }, 2 },
/* EN_BK =1(auto black) */
{ { 0xE5, 0x01 }, 2 },
/* CMD mode(10) VDO mode(03) */
{ { 0xBB, 0x03 }, 2 },
/* Non Reload MTP */
{ { 0xFB, 0x01 }, 2 },
};
static int truly_dcs_write(struct drm_panel *panel, u32 command)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int i, ret;
for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
ret = mipi_dsi_dcs_write(ctx->dsi[i], command, NULL, 0);
if (ret < 0) {
dev_err(ctx->dev, "cmd 0x%x failed for dsi = %d\n", command, i);
}
}
return ret;
}
static int truly_dcs_write_buf(struct drm_panel *panel,
u32 size, const u8 *buf)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret = 0;
int i;
for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
ret = mipi_dsi_dcs_write_buffer(ctx->dsi[i], buf, size);
if (ret < 0) {
dev_err(ctx->dev, "failed to tx cmd [%d], err: %d\n", i, ret);
return ret;
}
}
return ret;
}
static int truly_35597_power_on(struct truly_nt35597 *ctx)
{
int ret, i;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
ret = regulator_set_load(ctx->supplies[i].consumer,
regulator_enable_loads[i]);
if (ret)
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
/*
* Reset sequence of truly panel requires the panel to be
* out of reset for 10ms, followed by being held in reset
* for 10ms and then out again
*/
gpiod_set_value(ctx->reset_gpio, 0);
usleep_range(10000, 20000);
gpiod_set_value(ctx->reset_gpio, 1);
usleep_range(10000, 20000);
gpiod_set_value(ctx->reset_gpio, 0);
usleep_range(10000, 20000);
return 0;
}
static int truly_nt35597_power_off(struct truly_nt35597 *ctx)
{
int ret = 0;
int i;
gpiod_set_value(ctx->reset_gpio, 1);
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
ret = regulator_set_load(ctx->supplies[i].consumer,
regulator_disable_loads[i]);
if (ret) {
dev_err(ctx->dev, "regulator_set_load failed %d\n", ret);
return ret;
}
}
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret) {
dev_err(ctx->dev, "regulator_bulk_disable failed %d\n", ret);
}
return ret;
}
static int truly_nt35597_disable(struct drm_panel *panel)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret;
if (!ctx->enabled)
return 0;
if (ctx->backlight) {
ret = backlight_disable(ctx->backlight);
if (ret < 0)
dev_err(ctx->dev, "backlight disable failed %d\n", ret);
}
ctx->enabled = false;
return 0;
}
static int truly_nt35597_unprepare(struct drm_panel *panel)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret = 0;
if (!ctx->prepared)
return 0;
ctx->dsi[0]->mode_flags = 0;
ctx->dsi[1]->mode_flags = 0;
ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_OFF);
if (ret < 0) {
dev_err(ctx->dev, "set_display_off cmd failed ret = %d\n", ret);
}
/* 120ms delay required here as per DCS spec */
msleep(120);
ret = truly_dcs_write(panel, MIPI_DCS_ENTER_SLEEP_MODE);
if (ret < 0) {
dev_err(ctx->dev, "enter_sleep cmd failed ret = %d\n", ret);
}
ret = truly_nt35597_power_off(ctx);
if (ret < 0)
dev_err(ctx->dev, "power_off failed ret = %d\n", ret);
ctx->prepared = false;
return ret;
}
static int truly_nt35597_prepare(struct drm_panel *panel)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret;
int i;
const struct cmd_set *panel_on_cmds;
const struct nt35597_config *config;
u32 num_cmds;
if (ctx->prepared)
return 0;
ret = truly_35597_power_on(ctx);
if (ret < 0)
return ret;
ctx->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
config = ctx->config;
panel_on_cmds = config->panel_on_cmds;
num_cmds = config->num_on_cmds;
for (i = 0; i < num_cmds; i++) {
ret = truly_dcs_write_buf(panel,
panel_on_cmds[i].size,
panel_on_cmds[i].commands);
if (ret < 0) {
dev_err(ctx->dev, "cmd set tx failed i = %d ret = %d\n", i, ret);
goto power_off;
}
}
ret = truly_dcs_write(panel, MIPI_DCS_EXIT_SLEEP_MODE);
if (ret < 0) {
dev_err(ctx->dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
goto power_off;
}
/* Per DSI spec wait 120ms after sending exit sleep DCS command */
msleep(120);
ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_ON);
if (ret < 0) {
dev_err(ctx->dev, "set_display_on cmd failed ret = %d\n", ret);
goto power_off;
}
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
msleep(120);
ctx->prepared = true;
return 0;
power_off:
if (truly_nt35597_power_off(ctx))
dev_err(ctx->dev, "power_off failed\n");
return ret;
}
static int truly_nt35597_enable(struct drm_panel *panel)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret;
if (ctx->enabled)
return 0;
if (ctx->backlight) {
ret = backlight_enable(ctx->backlight);
if (ret < 0)
dev_err(ctx->dev, "backlight enable failed %d\n", ret);
}
ctx->enabled = true;
return 0;
}
static int truly_nt35597_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct truly_nt35597 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
const struct nt35597_config *config;
config = ctx->config;
mode = drm_mode_duplicate(connector->dev, config->dm);
if (!mode) {
dev_err(ctx->dev, "failed to create a new display mode\n");
return 0;
}
connector->display_info.width_mm = config->width_mm;
connector->display_info.height_mm = config->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs truly_nt35597_drm_funcs = {
.disable = truly_nt35597_disable,
.unprepare = truly_nt35597_unprepare,
.prepare = truly_nt35597_prepare,
.enable = truly_nt35597_enable,
.get_modes = truly_nt35597_get_modes,
};
static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
{
struct device *dev = ctx->dev;
int ret, i;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return ret;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
ctx->mode_gpio = devm_gpiod_get(dev, "mode", GPIOD_OUT_LOW);
if (IS_ERR(ctx->mode_gpio)) {
dev_err(dev, "cannot get mode gpio %ld\n", PTR_ERR(ctx->mode_gpio));
return PTR_ERR(ctx->mode_gpio);
}
/* dual port */
gpiod_set_value(ctx->mode_gpio, 0);
drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
return 0;
}
static const struct drm_display_mode qcom_sdm845_mtp_2k_mode = {
.name = "1440x2560",
.clock = 268316,
.hdisplay = 1440,
.hsync_start = 1440 + 200,
.hsync_end = 1440 + 200 + 32,
.htotal = 1440 + 200 + 32 + 64,
.vdisplay = 2560,
.vsync_start = 2560 + 8,
.vsync_end = 2560 + 8 + 1,
.vtotal = 2560 + 8 + 1 + 7,
.flags = 0,
};
static const struct nt35597_config nt35597_dir = {
.width_mm = 74,
.height_mm = 131,
.panel_name = "qcom_sdm845_mtp_2k_panel",
.dm = &qcom_sdm845_mtp_2k_mode,
.panel_on_cmds = qcom_2k_panel_magic_cmds,
.num_on_cmds = ARRAY_SIZE(qcom_2k_panel_magic_cmds),
};
static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct truly_nt35597 *ctx;
struct mipi_dsi_device *dsi1_device;
struct device_node *dsi1;
struct mipi_dsi_host *dsi1_host;
struct mipi_dsi_device *dsi_dev;
int ret = 0;
int i;
const struct mipi_dsi_device_info info = {
.type = "trulynt35597",
.channel = 0,
.node = NULL,
};
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
/*
* This device represents itself as one with two input ports which are
* fed by the output ports of the two DSI controllers . The DSI0 is
* the master controller and has most of the panel related info in its
* child node.
*/
ctx->config = of_device_get_match_data(dev);
if (!ctx->config) {
dev_err(dev, "missing device configuration\n");
return -ENODEV;
}
dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
if (!dsi1) {
dev_err(dev, "failed to get remote node for dsi1_device\n");
return -ENODEV;
}
dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
of_node_put(dsi1);
if (!dsi1_host) {
dev_err(dev, "failed to find dsi host\n");
return -EPROBE_DEFER;
}
/* register the second DSI device */
dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
if (IS_ERR(dsi1_device)) {
dev_err(dev, "failed to create dsi device\n");
return PTR_ERR(dsi1_device);
}
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
ctx->dsi[0] = dsi;
ctx->dsi[1] = dsi1_device;
ret = truly_nt35597_panel_add(ctx);
if (ret) {
dev_err(dev, "failed to add panel\n");
goto err_panel_add;
}
for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
dsi_dev = ctx->dsi[i];
dsi_dev->lanes = 4;
dsi_dev->format = MIPI_DSI_FMT_RGB888;
dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ret = mipi_dsi_attach(dsi_dev);
if (ret < 0) {
dev_err(dev, "dsi attach failed i = %d\n", i);
goto err_dsi_attach;
}
}
return 0;
err_dsi_attach:
drm_panel_remove(&ctx->panel);
err_panel_add:
mipi_dsi_device_unregister(dsi1_device);
return ret;
}
static void truly_nt35597_remove(struct mipi_dsi_device *dsi)
{
struct truly_nt35597 *ctx = mipi_dsi_get_drvdata(dsi);
if (ctx->dsi[0])
mipi_dsi_detach(ctx->dsi[0]);
if (ctx->dsi[1]) {
mipi_dsi_detach(ctx->dsi[1]);
mipi_dsi_device_unregister(ctx->dsi[1]);
}
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id truly_nt35597_of_match[] = {
{
.compatible = "truly,nt35597-2K-display",
.data = &nt35597_dir,
},
{ }
};
MODULE_DEVICE_TABLE(of, truly_nt35597_of_match);
static struct mipi_dsi_driver truly_nt35597_driver = {
.driver = {
.name = "panel-truly-nt35597",
.of_match_table = truly_nt35597_of_match,
},
.probe = truly_nt35597_probe,
.remove = truly_nt35597_remove,
};
module_mipi_dsi_driver(truly_nt35597_driver);
MODULE_DESCRIPTION("Truly NT35597 DSI Panel Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-truly-nt35597.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sony ACX565AKM LCD Panel driver
*
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Based on the omapdrm-specific panel-sony-acx565akm driver
*
* Copyright (C) 2010 Nokia Corporation
* Author: Imre Deak <[email protected]>
*/
/*
* TODO (to be addressed with hardware access to test the changes):
*
* - Update backlight support to use backlight_update_status() etc.
* - Use prepare/unprepare for the basic power on/off of the backligt
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_connector.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define CTRL_DISP_BRIGHTNESS_CTRL_ON BIT(5)
#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON BIT(4)
#define CTRL_DISP_BACKLIGHT_ON BIT(2)
#define CTRL_DISP_AUTO_BRIGHTNESS_ON BIT(1)
#define MIPID_CMD_WRITE_CABC 0x55
#define MIPID_CMD_READ_CABC 0x56
#define MIPID_VER_LPH8923 3
#define MIPID_VER_LS041Y3 4
#define MIPID_VER_L4F00311 8
#define MIPID_VER_ACX565AKM 9
struct acx565akm_panel {
struct drm_panel panel;
struct spi_device *spi;
struct gpio_desc *reset_gpio;
struct backlight_device *backlight;
struct mutex mutex;
const char *name;
u8 display_id[3];
int model;
int revision;
bool has_bc;
bool has_cabc;
bool enabled;
unsigned int cabc_mode;
/*
* Next value of jiffies when we can issue the next sleep in/out
* command.
*/
unsigned long hw_guard_end;
unsigned long hw_guard_wait; /* max guard time in jiffies */
};
#define to_acx565akm_device(p) container_of(p, struct acx565akm_panel, panel)
static void acx565akm_transfer(struct acx565akm_panel *lcd, int cmd,
const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct spi_message m;
struct spi_transfer *x, xfer[5];
int ret;
spi_message_init(&m);
memset(xfer, 0, sizeof(xfer));
x = &xfer[0];
cmd &= 0xff;
x->tx_buf = &cmd;
x->bits_per_word = 9;
x->len = 2;
if (rlen > 1 && wlen == 0) {
/*
* Between the command and the response data there is a
* dummy clock cycle. Add an extra bit after the command
* word to account for this.
*/
x->bits_per_word = 10;
cmd <<= 1;
}
spi_message_add_tail(x, &m);
if (wlen) {
x++;
x->tx_buf = wbuf;
x->len = wlen;
x->bits_per_word = 9;
spi_message_add_tail(x, &m);
}
if (rlen) {
x++;
x->rx_buf = rbuf;
x->len = rlen;
spi_message_add_tail(x, &m);
}
ret = spi_sync(lcd->spi, &m);
if (ret < 0)
dev_dbg(&lcd->spi->dev, "spi_sync %d\n", ret);
}
static inline void acx565akm_cmd(struct acx565akm_panel *lcd, int cmd)
{
acx565akm_transfer(lcd, cmd, NULL, 0, NULL, 0);
}
static inline void acx565akm_write(struct acx565akm_panel *lcd,
int reg, const u8 *buf, int len)
{
acx565akm_transfer(lcd, reg, buf, len, NULL, 0);
}
static inline void acx565akm_read(struct acx565akm_panel *lcd,
int reg, u8 *buf, int len)
{
acx565akm_transfer(lcd, reg, NULL, 0, buf, len);
}
/* -----------------------------------------------------------------------------
* Auto Brightness Control Via sysfs
*/
static unsigned int acx565akm_get_cabc_mode(struct acx565akm_panel *lcd)
{
return lcd->cabc_mode;
}
static void acx565akm_set_cabc_mode(struct acx565akm_panel *lcd,
unsigned int mode)
{
u16 cabc_ctrl;
lcd->cabc_mode = mode;
if (!lcd->enabled)
return;
cabc_ctrl = 0;
acx565akm_read(lcd, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
cabc_ctrl &= ~3;
cabc_ctrl |= (1 << 8) | (mode & 3);
acx565akm_write(lcd, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
}
static unsigned int acx565akm_get_hw_cabc_mode(struct acx565akm_panel *lcd)
{
u8 cabc_ctrl;
acx565akm_read(lcd, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
return cabc_ctrl & 3;
}
static const char * const acx565akm_cabc_modes[] = {
"off", /* always used when CABC is not supported */
"ui",
"still-image",
"moving-image",
};
static ssize_t cabc_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acx565akm_panel *lcd = dev_get_drvdata(dev);
const char *mode_str;
int mode;
if (!lcd->has_cabc)
mode = 0;
else
mode = acx565akm_get_cabc_mode(lcd);
mode_str = "unknown";
if (mode >= 0 && mode < ARRAY_SIZE(acx565akm_cabc_modes))
mode_str = acx565akm_cabc_modes[mode];
return sprintf(buf, "%s\n", mode_str);
}
static ssize_t cabc_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct acx565akm_panel *lcd = dev_get_drvdata(dev);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++) {
const char *mode_str = acx565akm_cabc_modes[i];
int cmp_len = strlen(mode_str);
if (count > 0 && buf[count - 1] == '\n')
count--;
if (count != cmp_len)
continue;
if (strncmp(buf, mode_str, cmp_len) == 0)
break;
}
if (i == ARRAY_SIZE(acx565akm_cabc_modes))
return -EINVAL;
if (!lcd->has_cabc && i != 0)
return -EINVAL;
mutex_lock(&lcd->mutex);
acx565akm_set_cabc_mode(lcd, i);
mutex_unlock(&lcd->mutex);
return count;
}
static ssize_t cabc_available_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acx565akm_panel *lcd = dev_get_drvdata(dev);
unsigned int i;
size_t len = 0;
if (!lcd->has_cabc)
return sprintf(buf, "%s\n", acx565akm_cabc_modes[0]);
for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++)
len += sprintf(&buf[len], "%s%s", i ? " " : "",
acx565akm_cabc_modes[i]);
buf[len++] = '\n';
return len;
}
static DEVICE_ATTR_RW(cabc_mode);
static DEVICE_ATTR_RO(cabc_available_modes);
static struct attribute *acx565akm_cabc_attrs[] = {
&dev_attr_cabc_mode.attr,
&dev_attr_cabc_available_modes.attr,
NULL,
};
static const struct attribute_group acx565akm_cabc_attr_group = {
.attrs = acx565akm_cabc_attrs,
};
/* -----------------------------------------------------------------------------
* Backlight Device
*/
static int acx565akm_get_actual_brightness(struct acx565akm_panel *lcd)
{
u8 bv;
acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1);
return bv;
}
static void acx565akm_set_brightness(struct acx565akm_panel *lcd, int level)
{
u16 ctrl;
int bv;
bv = level | (1 << 8);
acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2);
acx565akm_read(lcd, MIPI_DCS_GET_CONTROL_DISPLAY, (u8 *)&ctrl, 1);
if (level)
ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
CTRL_DISP_BACKLIGHT_ON;
else
ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
CTRL_DISP_BACKLIGHT_ON);
ctrl |= 1 << 8;
acx565akm_write(lcd, MIPI_DCS_WRITE_CONTROL_DISPLAY, (u8 *)&ctrl, 2);
}
static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
{
struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
int level = backlight_get_brightness(dev);
acx565akm_set_brightness(lcd, level);
return 0;
}
static int acx565akm_bl_update_status(struct backlight_device *dev)
{
struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
int ret;
mutex_lock(&lcd->mutex);
ret = acx565akm_bl_update_status_locked(dev);
mutex_unlock(&lcd->mutex);
return ret;
}
static int acx565akm_bl_get_intensity(struct backlight_device *dev)
{
struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
unsigned int intensity;
mutex_lock(&lcd->mutex);
if (!backlight_is_blank(dev))
intensity = acx565akm_get_actual_brightness(lcd);
else
intensity = 0;
mutex_unlock(&lcd->mutex);
return intensity;
}
static const struct backlight_ops acx565akm_bl_ops = {
.get_brightness = acx565akm_bl_get_intensity,
.update_status = acx565akm_bl_update_status,
};
static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
{
struct backlight_properties props = {
.power = FB_BLANK_UNBLANK,
.type = BACKLIGHT_RAW,
};
int ret;
lcd->backlight = backlight_device_register(lcd->name, &lcd->spi->dev,
lcd, &acx565akm_bl_ops,
&props);
if (IS_ERR(lcd->backlight)) {
ret = PTR_ERR(lcd->backlight);
lcd->backlight = NULL;
return ret;
}
if (lcd->has_cabc) {
ret = sysfs_create_group(&lcd->backlight->dev.kobj,
&acx565akm_cabc_attr_group);
if (ret < 0) {
dev_err(&lcd->spi->dev,
"%s failed to create sysfs files\n", __func__);
backlight_device_unregister(lcd->backlight);
return ret;
}
lcd->cabc_mode = acx565akm_get_hw_cabc_mode(lcd);
}
lcd->backlight->props.max_brightness = 255;
lcd->backlight->props.brightness = acx565akm_get_actual_brightness(lcd);
acx565akm_bl_update_status_locked(lcd->backlight);
return 0;
}
static void acx565akm_backlight_cleanup(struct acx565akm_panel *lcd)
{
if (lcd->has_cabc)
sysfs_remove_group(&lcd->backlight->dev.kobj,
&acx565akm_cabc_attr_group);
backlight_device_unregister(lcd->backlight);
}
/* -----------------------------------------------------------------------------
* DRM Bridge Operations
*/
static void acx565akm_set_sleep_mode(struct acx565akm_panel *lcd, int on)
{
int cmd = on ? MIPI_DCS_ENTER_SLEEP_MODE : MIPI_DCS_EXIT_SLEEP_MODE;
unsigned long wait;
/*
* We have to keep 120msec between sleep in/out commands.
* (8.2.15, 8.2.16).
*/
wait = lcd->hw_guard_end - jiffies;
if ((long)wait > 0 && wait <= lcd->hw_guard_wait) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait);
}
acx565akm_cmd(lcd, cmd);
lcd->hw_guard_wait = msecs_to_jiffies(120);
lcd->hw_guard_end = jiffies + lcd->hw_guard_wait;
}
static void acx565akm_set_display_state(struct acx565akm_panel *lcd,
int enabled)
{
int cmd = enabled ? MIPI_DCS_SET_DISPLAY_ON : MIPI_DCS_SET_DISPLAY_OFF;
acx565akm_cmd(lcd, cmd);
}
static int acx565akm_power_on(struct acx565akm_panel *lcd)
{
/*FIXME tweak me */
msleep(50);
gpiod_set_value(lcd->reset_gpio, 1);
if (lcd->enabled) {
dev_dbg(&lcd->spi->dev, "panel already enabled\n");
return 0;
}
/*
* We have to meet all the following delay requirements:
* 1. tRW: reset pulse width 10usec (7.12.1)
* 2. tRT: reset cancel time 5msec (7.12.1)
* 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
* case (7.6.2)
* 4. 120msec before the sleep out command (7.12.1)
*/
msleep(120);
acx565akm_set_sleep_mode(lcd, 0);
lcd->enabled = true;
/* 5msec between sleep out and the next command. (8.2.16) */
usleep_range(5000, 10000);
acx565akm_set_display_state(lcd, 1);
acx565akm_set_cabc_mode(lcd, lcd->cabc_mode);
return acx565akm_bl_update_status_locked(lcd->backlight);
}
static void acx565akm_power_off(struct acx565akm_panel *lcd)
{
if (!lcd->enabled)
return;
acx565akm_set_display_state(lcd, 0);
acx565akm_set_sleep_mode(lcd, 1);
lcd->enabled = false;
/*
* We have to provide PCLK,HS,VS signals for 2 frames (worst case
* ~50msec) after sending the sleep in command and asserting the
* reset signal. We probably could assert the reset w/o the delay
* but we still delay to avoid possible artifacts. (7.6.1)
*/
msleep(50);
gpiod_set_value(lcd->reset_gpio, 0);
/* FIXME need to tweak this delay */
msleep(100);
}
static int acx565akm_disable(struct drm_panel *panel)
{
struct acx565akm_panel *lcd = to_acx565akm_device(panel);
mutex_lock(&lcd->mutex);
acx565akm_power_off(lcd);
mutex_unlock(&lcd->mutex);
return 0;
}
static int acx565akm_enable(struct drm_panel *panel)
{
struct acx565akm_panel *lcd = to_acx565akm_device(panel);
mutex_lock(&lcd->mutex);
acx565akm_power_on(lcd);
mutex_unlock(&lcd->mutex);
return 0;
}
static const struct drm_display_mode acx565akm_mode = {
.clock = 24000,
.hdisplay = 800,
.hsync_start = 800 + 28,
.hsync_end = 800 + 28 + 4,
.htotal = 800 + 28 + 4 + 24,
.vdisplay = 480,
.vsync_start = 480 + 3,
.vsync_end = 480 + 3 + 3,
.vtotal = 480 + 3 + 3 + 4,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 77,
.height_mm = 46,
};
static int acx565akm_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &acx565akm_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = acx565akm_mode.width_mm;
connector->display_info.height_mm = acx565akm_mode.height_mm;
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
| DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
return 1;
}
static const struct drm_panel_funcs acx565akm_funcs = {
.disable = acx565akm_disable,
.enable = acx565akm_enable,
.get_modes = acx565akm_get_modes,
};
/* -----------------------------------------------------------------------------
* Probe, Detect and Remove
*/
static int acx565akm_detect(struct acx565akm_panel *lcd)
{
__be32 value;
u32 status;
int ret = 0;
/*
* After being taken out of reset the panel needs 5ms before the first
* command can be sent.
*/
gpiod_set_value(lcd->reset_gpio, 1);
usleep_range(5000, 10000);
acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_STATUS, (u8 *)&value, 4);
status = __be32_to_cpu(value);
lcd->enabled = (status & (1 << 17)) && (status & (1 << 10));
dev_dbg(&lcd->spi->dev,
"LCD panel %s by bootloader (status 0x%04x)\n",
lcd->enabled ? "enabled" : "disabled ", status);
acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3);
dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n",
lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]);
switch (lcd->display_id[0]) {
case 0x10:
lcd->model = MIPID_VER_ACX565AKM;
lcd->name = "acx565akm";
lcd->has_bc = 1;
lcd->has_cabc = 1;
break;
case 0x29:
lcd->model = MIPID_VER_L4F00311;
lcd->name = "l4f00311";
break;
case 0x45:
lcd->model = MIPID_VER_LPH8923;
lcd->name = "lph8923";
break;
case 0x83:
lcd->model = MIPID_VER_LS041Y3;
lcd->name = "ls041y3";
break;
default:
lcd->name = "unknown";
dev_err(&lcd->spi->dev, "unknown display ID\n");
ret = -ENODEV;
goto done;
}
lcd->revision = lcd->display_id[1];
dev_info(&lcd->spi->dev, "%s rev %02x panel detected\n",
lcd->name, lcd->revision);
done:
if (!lcd->enabled)
gpiod_set_value(lcd->reset_gpio, 0);
return ret;
}
static int acx565akm_probe(struct spi_device *spi)
{
struct acx565akm_panel *lcd;
int ret;
lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
spi_set_drvdata(spi, lcd);
spi->mode = SPI_MODE_3;
lcd->spi = spi;
mutex_init(&lcd->mutex);
lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(lcd->reset_gpio)) {
dev_err(&spi->dev, "failed to get reset GPIO\n");
return PTR_ERR(lcd->reset_gpio);
}
ret = acx565akm_detect(lcd);
if (ret < 0) {
dev_err(&spi->dev, "panel detection failed\n");
return ret;
}
if (lcd->has_bc) {
ret = acx565akm_backlight_init(lcd);
if (ret < 0)
return ret;
}
drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&lcd->panel);
return 0;
}
static void acx565akm_remove(struct spi_device *spi)
{
struct acx565akm_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
if (lcd->has_bc)
acx565akm_backlight_cleanup(lcd);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
}
static const struct of_device_id acx565akm_of_match[] = {
{ .compatible = "sony,acx565akm", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, acx565akm_of_match);
static const struct spi_device_id acx565akm_ids[] = {
{ "acx565akm", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, acx565akm_ids);
static struct spi_driver acx565akm_driver = {
.probe = acx565akm_probe,
.remove = acx565akm_remove,
.id_table = acx565akm_ids,
.driver = {
.name = "panel-sony-acx565akm",
.of_match_table = acx565akm_of_match,
},
};
module_spi_driver(acx565akm_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-sony-acx565akm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Elida kd35t133 5.5" MIPI-DSI panel driver
* Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
*
* based on
*
* Rockteck jh057n00900 5.5" MIPI-DSI panel driver
* Copyright (C) Purism SPC 2019
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
/* Manufacturer specific Commands send via DSI */
#define KD35T133_CMD_INTERFACEMODECTRL 0xb0
#define KD35T133_CMD_FRAMERATECTRL 0xb1
#define KD35T133_CMD_DISPLAYINVERSIONCTRL 0xb4
#define KD35T133_CMD_DISPLAYFUNCTIONCTRL 0xb6
#define KD35T133_CMD_POWERCONTROL1 0xc0
#define KD35T133_CMD_POWERCONTROL2 0xc1
#define KD35T133_CMD_VCOMCONTROL 0xc5
#define KD35T133_CMD_POSITIVEGAMMA 0xe0
#define KD35T133_CMD_NEGATIVEGAMMA 0xe1
#define KD35T133_CMD_SETIMAGEFUNCTION 0xe9
#define KD35T133_CMD_ADJUSTCONTROL3 0xf7
struct kd35t133 {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *vdd;
struct regulator *iovcc;
enum drm_panel_orientation orientation;
bool prepared;
};
static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
{
return container_of(panel, struct kd35t133, panel);
}
static int kd35t133_init_sequence(struct kd35t133 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
struct device *dev = ctx->dev;
/*
* Init sequence was supplied by the panel vendor with minimal
* documentation.
*/
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
0x20, 0x02);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
0xa9, 0x51, 0x2c, 0x82);
mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
static int kd35t133_unprepare(struct drm_panel *panel)
{
struct kd35t133 *ctx = panel_to_kd35t133(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (!ctx->prepared)
return 0;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(ctx->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
return ret;
}
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vdd);
ctx->prepared = false;
return 0;
}
static int kd35t133_prepare(struct drm_panel *panel)
{
struct kd35t133 *ctx = panel_to_kd35t133(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (ctx->prepared)
return 0;
dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vdd);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vdd;
}
msleep(20);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(10, 20);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(20);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
msleep(250);
ret = kd35t133_init_sequence(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
goto disable_iovcc;
}
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
goto disable_iovcc;
}
msleep(50);
ctx->prepared = true;
return 0;
disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vdd:
regulator_disable(ctx->vdd);
return ret;
}
static const struct drm_display_mode default_mode = {
.hdisplay = 320,
.hsync_start = 320 + 130,
.hsync_end = 320 + 130 + 4,
.htotal = 320 + 130 + 4 + 130,
.vdisplay = 480,
.vsync_start = 480 + 2,
.vsync_end = 480 + 2 + 1,
.vtotal = 480 + 2 + 1 + 2,
.clock = 17000,
.width_mm = 42,
.height_mm = 82,
};
static int kd35t133_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct kd35t133 *ctx = panel_to_kd35t133(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
static enum drm_panel_orientation kd35t133_get_orientation(struct drm_panel *panel)
{
struct kd35t133 *ctx = panel_to_kd35t133(panel);
return ctx->orientation;
}
static const struct drm_panel_funcs kd35t133_funcs = {
.unprepare = kd35t133_unprepare,
.prepare = kd35t133_prepare,
.get_modes = kd35t133_get_modes,
.get_orientation = kd35t133_get_orientation,
};
static int kd35t133_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct kd35t133 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
ctx->vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(ctx->vdd)) {
ret = PTR_ERR(ctx->vdd);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request vdd regulator: %d\n", ret);
return ret;
}
ctx->iovcc = devm_regulator_get(dev, "iovcc");
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (ret < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
return ret;
}
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
{
struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static void kd35t133_remove(struct mipi_dsi_device *dsi)
{
struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
kd35t133_shutdown(dsi);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id kd35t133_of_match[] = {
{ .compatible = "elida,kd35t133" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, kd35t133_of_match);
static struct mipi_dsi_driver kd35t133_driver = {
.driver = {
.name = "panel-elida-kd35t133",
.of_match_table = kd35t133_of_match,
},
.probe = kd35t133_probe,
.remove = kd35t133_remove,
.shutdown = kd35t133_shutdown,
};
module_mipi_dsi_driver(kd35t133_driver);
MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Elida kd35t133 MIPI DSI panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-elida-kd35t133.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Samsung S6D7AA0 MIPI-DSI TFT LCD controller drm_panel driver.
*
* Copyright (C) 2022 Artur Weber <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
/* Manufacturer command set */
#define MCS_BL_CTL 0xc3
#define MCS_OTP_RELOAD 0xd0
#define MCS_PASSWD1 0xf0
#define MCS_PASSWD2 0xf1
#define MCS_PASSWD3 0xfc
struct s6d7aa0 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[2];
const struct s6d7aa0_panel_desc *desc;
};
struct s6d7aa0_panel_desc {
unsigned int panel_type;
int (*init_func)(struct s6d7aa0 *ctx);
int (*off_func)(struct s6d7aa0 *ctx);
const struct drm_display_mode *drm_mode;
unsigned long mode_flags;
u32 bus_flags;
bool has_backlight;
bool use_passwd3;
};
enum s6d7aa0_panels {
S6D7AA0_PANEL_LSL080AL02,
S6D7AA0_PANEL_LSL080AL03,
S6D7AA0_PANEL_LTL101AT01,
};
static inline struct s6d7aa0 *panel_to_s6d7aa0(struct drm_panel *panel)
{
return container_of(panel, struct s6d7aa0, panel);
}
static void s6d7aa0_reset(struct s6d7aa0 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
msleep(50);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(50);
}
static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
{
struct mipi_dsi_device *dsi = ctx->dsi;
if (lock) {
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5);
if (ctx->desc->use_passwd3)
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a);
} else {
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a);
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a);
if (ctx->desc->use_passwd3)
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
}
return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
ret = ctx->desc->init_func(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int s6d7aa0_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
ret = ctx->desc->off_func(ctx);
if (ret < 0) {
dev_err(dev, "Panel-specific off function failed: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
msleep(64);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(120);
return 0;
}
static int s6d7aa0_prepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
return ret;
}
s6d7aa0_reset(ctx);
ret = s6d7aa0_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
return 0;
}
static int s6d7aa0_disable(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
ret = s6d7aa0_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
return 0;
}
static int s6d7aa0_unprepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return 0;
}
/* Backlight control code */
static int s6d7aa0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
int ret;
ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
if (ret < 0)
return ret;
return 0;
}
static int s6d7aa0_bl_get_brightness(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness;
int ret;
ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
if (ret < 0)
return ret;
return brightness & 0xff;
}
static const struct backlight_ops s6d7aa0_bl_ops = {
.update_status = s6d7aa0_bl_update_status,
.get_brightness = s6d7aa0_bl_get_brightness,
};
static struct backlight_device *
s6d7aa0_create_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = 255,
.max_brightness = 255,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&s6d7aa0_bl_ops, &props);
}
/* Initialization code and structures for LSL080AL02 panel */
static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
usleep_range(20000, 25000);
ret = s6d7aa0_lock(ctx, false);
if (ret < 0) {
dev_err(dev, "Failed to unlock registers: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10);
usleep_range(1000, 1500);
/* SEQ_B6_PARAM_8_R01 */
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10);
/* BL_CTL_ON */
mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28);
usleep_range(5000, 6000);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
ret = s6d7aa0_lock(ctx, true);
if (ret < 0) {
dev_err(dev, "Failed to lock registers: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
/* BL_CTL_OFF */
mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20);
return 0;
}
static const struct drm_display_mode s6d7aa0_lsl080al02_mode = {
.clock = (800 + 16 + 4 + 140) * (1280 + 8 + 4 + 4) * 60 / 1000,
.hdisplay = 800,
.hsync_start = 800 + 16,
.hsync_end = 800 + 16 + 4,
.htotal = 800 + 16 + 4 + 140,
.vdisplay = 1280,
.vsync_start = 1280 + 8,
.vsync_end = 1280 + 8 + 4,
.vtotal = 1280 + 8 + 4 + 4,
.width_mm = 108,
.height_mm = 173,
};
static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.panel_type = S6D7AA0_PANEL_LSL080AL02,
.init_func = s6d7aa0_lsl080al02_init,
.off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode,
.mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.has_backlight = false,
.use_passwd3 = false,
};
/* Initialization code and structures for LSL080AL03 panel */
static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
usleep_range(20000, 25000);
ret = s6d7aa0_lock(ctx, false);
if (ret < 0) {
dev_err(dev, "Failed to unlock registers: %d\n", ret);
return ret;
}
if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) {
mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29);
mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
0x09);
mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
0x80, 0x78);
} else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) {
mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b);
mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
0x09);
mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
0x80, 0x68);
}
mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08);
usleep_range(10000, 11000);
mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30);
mipi_dsi_dcs_write_seq(dsi, 0xcd,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
mipi_dsi_dcs_write_seq(dsi, 0xce,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
ret = s6d7aa0_lock(ctx, true);
if (ret < 0) {
dev_err(dev, "Failed to lock registers: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
return 0;
}
static const struct drm_display_mode s6d7aa0_lsl080al03_mode = {
.clock = (768 + 18 + 16 + 126) * (1024 + 8 + 2 + 6) * 60 / 1000,
.hdisplay = 768,
.hsync_start = 768 + 18,
.hsync_end = 768 + 18 + 16,
.htotal = 768 + 18 + 16 + 126,
.vdisplay = 1024,
.vsync_start = 1024 + 8,
.vsync_end = 1024 + 8 + 2,
.vtotal = 1024 + 8 + 2 + 6,
.width_mm = 122,
.height_mm = 163,
};
static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al03_desc = {
.panel_type = S6D7AA0_PANEL_LSL080AL03,
.init_func = s6d7aa0_lsl080al03_init,
.off_func = s6d7aa0_lsl080al03_off,
.drm_mode = &s6d7aa0_lsl080al03_mode,
.mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET,
.bus_flags = 0,
.has_backlight = true,
.use_passwd3 = true,
};
/* Initialization structures for LTL101AT01 panel */
static const struct drm_display_mode s6d7aa0_ltl101at01_mode = {
.clock = (768 + 96 + 16 + 184) * (1024 + 8 + 2 + 6) * 60 / 1000,
.hdisplay = 768,
.hsync_start = 768 + 96,
.hsync_end = 768 + 96 + 16,
.htotal = 768 + 96 + 16 + 184,
.vdisplay = 1024,
.vsync_start = 1024 + 8,
.vsync_end = 1024 + 8 + 2,
.vtotal = 1024 + 8 + 2 + 6,
.width_mm = 148,
.height_mm = 197,
};
static const struct s6d7aa0_panel_desc s6d7aa0_ltl101at01_desc = {
.panel_type = S6D7AA0_PANEL_LTL101AT01,
.init_func = s6d7aa0_lsl080al03_init, /* Similar init to LSL080AL03 */
.off_func = s6d7aa0_lsl080al03_off,
.drm_mode = &s6d7aa0_ltl101at01_mode,
.mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET,
.bus_flags = 0,
.has_backlight = true,
.use_passwd3 = true,
};
static int s6d7aa0_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct s6d7aa0 *ctx;
ctx = container_of(panel, struct s6d7aa0, panel);
if (!ctx)
return -EINVAL;
mode = drm_mode_duplicate(connector->dev, ctx->desc->drm_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
connector->display_info.bus_flags = ctx->desc->bus_flags;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6d7aa0_panel_funcs = {
.disable = s6d7aa0_disable,
.prepare = s6d7aa0_prepare,
.unprepare = s6d7aa0_unprepare,
.get_modes = s6d7aa0_get_modes,
};
static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct s6d7aa0 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc)
return -ENODEV;
ctx->supplies[0].supply = "power";
ctx->supplies[1].supply = "vmipi";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
| ctx->desc->mode_flags;
drm_panel_init(&ctx->panel, dev, &s6d7aa0_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
/* Use DSI-based backlight as fallback if available */
if (ctx->desc->has_backlight && !ctx->panel.backlight) {
ctx->panel.backlight = s6d7aa0_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
"Failed to create backlight\n");
}
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void s6d7aa0_remove(struct mipi_dsi_device *dsi)
{
struct s6d7aa0 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id s6d7aa0_of_match[] = {
{
.compatible = "samsung,lsl080al02",
.data = &s6d7aa0_lsl080al02_desc
},
{
.compatible = "samsung,lsl080al03",
.data = &s6d7aa0_lsl080al03_desc
},
{
.compatible = "samsung,ltl101at01",
.data = &s6d7aa0_ltl101at01_desc
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, s6d7aa0_of_match);
static struct mipi_dsi_driver s6d7aa0_driver = {
.probe = s6d7aa0_probe,
.remove = s6d7aa0_remove,
.driver = {
.name = "panel-samsung-s6d7aa0",
.of_match_table = s6d7aa0_of_match,
},
};
module_mipi_dsi_driver(s6d7aa0_driver);
MODULE_AUTHOR("Artur Weber <[email protected]>");
MODULE_DESCRIPTION("Samsung S6D7AA0 MIPI-DSI LCD controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Heiko Schocher <[email protected]>
*
* from:
* drivers/gpu/drm/panel/panel-ld9040.c
* ld9040 AMOLED LCD drm_panel driver.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* Derived from drivers/video/backlight/ld9040.c
*
* Andrzej Hajda <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_device.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct lg4573 {
struct drm_panel panel;
struct spi_device *spi;
struct videomode vm;
};
static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel)
{
return container_of(panel, struct lg4573, panel);
}
static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
{
struct spi_transfer xfer = {
.len = 2,
};
__be16 temp = cpu_to_be16(data);
struct spi_message msg;
dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
xfer.tx_buf = &temp;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
return spi_sync(ctx->spi, &msg);
}
static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer,
unsigned int count)
{
unsigned int i;
int ret;
for (i = 0; i < count; i++) {
ret = lg4573_spi_write_u16(ctx, buffer[i]);
if (ret)
return ret;
}
return 0;
}
static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs)
{
return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs));
}
static int lg4573_display_on(struct lg4573 *ctx)
{
int ret;
ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
if (ret)
return ret;
msleep(5);
return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON);
}
static int lg4573_display_off(struct lg4573 *ctx)
{
int ret;
ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF);
if (ret)
return ret;
msleep(120);
return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
}
static int lg4573_display_mode_settings(struct lg4573 *ctx)
{
static const u16 display_mode_settings[] = {
0x703A, 0x7270, 0x70B1, 0x7208,
0x723B, 0x720F, 0x70B2, 0x7200,
0x72C8, 0x70B3, 0x7200, 0x70B4,
0x7200, 0x70B5, 0x7242, 0x7210,
0x7210, 0x7200, 0x7220, 0x70B6,
0x720B, 0x720F, 0x723C, 0x7213,
0x7213, 0x72E8, 0x70B7, 0x7246,
0x7206, 0x720C, 0x7200, 0x7200,
};
dev_dbg(ctx->panel.dev, "transfer display mode settings\n");
return lg4573_spi_write_u16_array(ctx, display_mode_settings,
ARRAY_SIZE(display_mode_settings));
}
static int lg4573_power_settings(struct lg4573 *ctx)
{
static const u16 power_settings[] = {
0x70C0, 0x7201, 0x7211, 0x70C3,
0x7207, 0x7203, 0x7204, 0x7204,
0x7204, 0x70C4, 0x7212, 0x7224,
0x7218, 0x7218, 0x7202, 0x7249,
0x70C5, 0x726F, 0x70C6, 0x7241,
0x7263,
};
dev_dbg(ctx->panel.dev, "transfer power settings\n");
return lg4573_spi_write_u16_array(ctx, power_settings,
ARRAY_SIZE(power_settings));
}
static int lg4573_gamma_settings(struct lg4573 *ctx)
{
static const u16 gamma_settings[] = {
0x70D0, 0x7203, 0x7207, 0x7273,
0x7235, 0x7200, 0x7201, 0x7220,
0x7200, 0x7203, 0x70D1, 0x7203,
0x7207, 0x7273, 0x7235, 0x7200,
0x7201, 0x7220, 0x7200, 0x7203,
0x70D2, 0x7203, 0x7207, 0x7273,
0x7235, 0x7200, 0x7201, 0x7220,
0x7200, 0x7203, 0x70D3, 0x7203,
0x7207, 0x7273, 0x7235, 0x7200,
0x7201, 0x7220, 0x7200, 0x7203,
0x70D4, 0x7203, 0x7207, 0x7273,
0x7235, 0x7200, 0x7201, 0x7220,
0x7200, 0x7203, 0x70D5, 0x7203,
0x7207, 0x7273, 0x7235, 0x7200,
0x7201, 0x7220, 0x7200, 0x7203,
};
dev_dbg(ctx->panel.dev, "transfer gamma settings\n");
return lg4573_spi_write_u16_array(ctx, gamma_settings,
ARRAY_SIZE(gamma_settings));
}
static int lg4573_init(struct lg4573 *ctx)
{
int ret;
dev_dbg(ctx->panel.dev, "initializing LCD\n");
ret = lg4573_display_mode_settings(ctx);
if (ret)
return ret;
ret = lg4573_power_settings(ctx);
if (ret)
return ret;
return lg4573_gamma_settings(ctx);
}
static int lg4573_power_on(struct lg4573 *ctx)
{
return lg4573_display_on(ctx);
}
static int lg4573_disable(struct drm_panel *panel)
{
struct lg4573 *ctx = panel_to_lg4573(panel);
return lg4573_display_off(ctx);
}
static int lg4573_enable(struct drm_panel *panel)
{
struct lg4573 *ctx = panel_to_lg4573(panel);
lg4573_init(ctx);
return lg4573_power_on(ctx);
}
static const struct drm_display_mode default_mode = {
.clock = 28341,
.hdisplay = 480,
.hsync_start = 480 + 10,
.hsync_end = 480 + 10 + 59,
.htotal = 480 + 10 + 59 + 10,
.vdisplay = 800,
.vsync_start = 800 + 15,
.vsync_end = 800 + 15 + 15,
.vtotal = 800 + 15 + 15 + 15,
};
static int lg4573_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 61;
connector->display_info.height_mm = 103;
return 1;
}
static const struct drm_panel_funcs lg4573_drm_funcs = {
.disable = lg4573_disable,
.enable = lg4573_enable,
.get_modes = lg4573_get_modes,
};
static int lg4573_probe(struct spi_device *spi)
{
struct lg4573 *ctx;
int ret;
ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->spi = spi;
spi_set_drvdata(spi, ctx);
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "SPI setup failed: %d\n", ret);
return ret;
}
drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&ctx->panel);
return 0;
}
static void lg4573_remove(struct spi_device *spi)
{
struct lg4573 *ctx = spi_get_drvdata(spi);
lg4573_display_off(ctx);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id lg4573_of_match[] = {
{ .compatible = "lg,lg4573" },
{ }
};
MODULE_DEVICE_TABLE(of, lg4573_of_match);
static struct spi_driver lg4573_driver = {
.probe = lg4573_probe,
.remove = lg4573_remove,
.driver = {
.name = "lg4573",
.of_match_table = lg4573_of_match,
},
};
module_spi_driver(lg4573_driver);
MODULE_AUTHOR("Heiko Schocher <[email protected]>");
MODULE_DESCRIPTION("lg4573 LCD Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-lg-lg4573.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xinpeng xpp055c272 5.5" MIPI-DSI panel driver
* Copyright (C) 2019 Theobroma Systems Design und Consulting GmbH
*
* based on
*
* Rockteck jh057n00900 5.5" MIPI-DSI panel driver
* Copyright (C) Purism SPC 2019
*/
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <video/display_timing.h>
#include <video/mipi_display.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
/* Manufacturer specific Commands send via DSI */
#define XPP055C272_CMD_ALL_PIXEL_OFF 0x22
#define XPP055C272_CMD_ALL_PIXEL_ON 0x23
#define XPP055C272_CMD_SETDISP 0xb2
#define XPP055C272_CMD_SETRGBIF 0xb3
#define XPP055C272_CMD_SETCYC 0xb4
#define XPP055C272_CMD_SETBGP 0xb5
#define XPP055C272_CMD_SETVCOM 0xb6
#define XPP055C272_CMD_SETOTP 0xb7
#define XPP055C272_CMD_SETPOWER_EXT 0xb8
#define XPP055C272_CMD_SETEXTC 0xb9
#define XPP055C272_CMD_SETMIPI 0xbA
#define XPP055C272_CMD_SETVDC 0xbc
#define XPP055C272_CMD_SETPCR 0xbf
#define XPP055C272_CMD_SETSCR 0xc0
#define XPP055C272_CMD_SETPOWER 0xc1
#define XPP055C272_CMD_SETECO 0xc6
#define XPP055C272_CMD_SETPANEL 0xcc
#define XPP055C272_CMD_SETGAMMA 0xe0
#define XPP055C272_CMD_SETEQ 0xe3
#define XPP055C272_CMD_SETGIP1 0xe9
#define XPP055C272_CMD_SETGIP2 0xea
struct xpp055c272 {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *vci;
struct regulator *iovcc;
bool prepared;
};
static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
{
return container_of(panel, struct xpp055c272, panel);
}
static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
struct device *dev = ctx->dev;
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETMIPI,
0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
0x00, 0x00, 0x37);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETSCR,
0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
0x00);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEQ,
0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER,
0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
0x67, 0x77, 0x33, 0x33);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
0xff, 0x01, 0xff);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
msleep(20);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP1,
0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP2,
0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
0xa0, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
0x11, 0x18);
msleep(60);
dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
static int xpp055c272_unprepare(struct drm_panel *panel)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (!ctx->prepared)
return 0;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(ctx->dev, "failed to set display off: %d\n", ret);
mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
return ret;
}
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vci);
ctx->prepared = false;
return 0;
}
static int xpp055c272_prepare(struct drm_panel *panel)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (ctx->prepared)
return 0;
dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vci);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vci;
}
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
/* T6: 10us */
usleep_range(10, 20);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
/* T8: 20ms */
msleep(20);
ret = xpp055c272_init_sequence(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
goto disable_iovcc;
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
/* T9: 120ms */
msleep(120);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
goto disable_iovcc;
}
msleep(50);
ctx->prepared = true;
return 0;
disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vci:
regulator_disable(ctx->vci);
return ret;
}
static const struct drm_display_mode default_mode = {
.hdisplay = 720,
.hsync_start = 720 + 40,
.hsync_end = 720 + 40 + 10,
.htotal = 720 + 40 + 10 + 40,
.vdisplay = 1280,
.vsync_start = 1280 + 22,
.vsync_end = 1280 + 22 + 4,
.vtotal = 1280 + 22 + 4 + 11,
.clock = 64000,
.width_mm = 68,
.height_mm = 121,
};
static int xpp055c272_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs xpp055c272_funcs = {
.unprepare = xpp055c272_unprepare,
.prepare = xpp055c272_prepare,
.get_modes = xpp055c272_get_modes,
};
static int xpp055c272_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct xpp055c272 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"cannot get reset gpio\n");
ctx->vci = devm_regulator_get(dev, "vci");
if (IS_ERR(ctx->vci))
return dev_err_probe(dev, PTR_ERR(ctx->vci),
"Failed to request vci regulator\n");
ctx->iovcc = devm_regulator_get(dev, "iovcc");
if (IS_ERR(ctx->iovcc))
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
{
struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static void xpp055c272_remove(struct mipi_dsi_device *dsi)
{
struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
xpp055c272_shutdown(dsi);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id xpp055c272_of_match[] = {
{ .compatible = "xinpeng,xpp055c272" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, xpp055c272_of_match);
static struct mipi_dsi_driver xpp055c272_driver = {
.driver = {
.name = "panel-xinpeng-xpp055c272",
.of_match_table = xpp055c272_of_match,
},
.probe = xpp055c272_probe,
.remove = xpp055c272_remove,
.shutdown = xpp055c272_shutdown,
};
module_mipi_dsi_driver(xpp055c272_driver);
MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Xinpeng xpp055c272 MIPI DSI panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2019 Theobroma Systems Design und Consulting GmbH
*
* base on panel-kingdisplay-kd097d04.c
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct ltk500hd1829 {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
bool prepared;
};
struct ltk500hd1829_cmd {
char cmd;
char data;
};
/*
* There is no description in the Reference Manual about these commands.
* We received them from the vendor, so just use them as is.
*/
static const struct ltk500hd1829_cmd init_code[] = {
{ 0xE0, 0x00 },
{ 0xE1, 0x93 },
{ 0xE2, 0x65 },
{ 0xE3, 0xF8 },
{ 0x80, 0x03 },
{ 0xE0, 0x04 },
{ 0x2D, 0x03 },
{ 0xE0, 0x01 },
{ 0x00, 0x00 },
{ 0x01, 0xB6 },
{ 0x03, 0x00 },
{ 0x04, 0xC5 },
{ 0x17, 0x00 },
{ 0x18, 0xBF },
{ 0x19, 0x01 },
{ 0x1A, 0x00 },
{ 0x1B, 0xBF },
{ 0x1C, 0x01 },
{ 0x1F, 0x7C },
{ 0x20, 0x26 },
{ 0x21, 0x26 },
{ 0x22, 0x4E },
{ 0x37, 0x09 },
{ 0x38, 0x04 },
{ 0x39, 0x08 },
{ 0x3A, 0x1F },
{ 0x3B, 0x1F },
{ 0x3C, 0x78 },
{ 0x3D, 0xFF },
{ 0x3E, 0xFF },
{ 0x3F, 0x00 },
{ 0x40, 0x04 },
{ 0x41, 0xA0 },
{ 0x43, 0x0F },
{ 0x44, 0x0A },
{ 0x45, 0x24 },
{ 0x55, 0x01 },
{ 0x56, 0x01 },
{ 0x57, 0xA5 },
{ 0x58, 0x0A },
{ 0x59, 0x4A },
{ 0x5A, 0x38 },
{ 0x5B, 0x10 },
{ 0x5C, 0x19 },
{ 0x5D, 0x7C },
{ 0x5E, 0x64 },
{ 0x5F, 0x54 },
{ 0x60, 0x48 },
{ 0x61, 0x44 },
{ 0x62, 0x35 },
{ 0x63, 0x3A },
{ 0x64, 0x24 },
{ 0x65, 0x3B },
{ 0x66, 0x39 },
{ 0x67, 0x37 },
{ 0x68, 0x56 },
{ 0x69, 0x41 },
{ 0x6A, 0x47 },
{ 0x6B, 0x2F },
{ 0x6C, 0x23 },
{ 0x6D, 0x13 },
{ 0x6E, 0x02 },
{ 0x6F, 0x08 },
{ 0x70, 0x7C },
{ 0x71, 0x64 },
{ 0x72, 0x54 },
{ 0x73, 0x48 },
{ 0x74, 0x44 },
{ 0x75, 0x35 },
{ 0x76, 0x3A },
{ 0x77, 0x22 },
{ 0x78, 0x3B },
{ 0x79, 0x39 },
{ 0x7A, 0x38 },
{ 0x7B, 0x52 },
{ 0x7C, 0x41 },
{ 0x7D, 0x47 },
{ 0x7E, 0x2F },
{ 0x7F, 0x23 },
{ 0x80, 0x13 },
{ 0x81, 0x02 },
{ 0x82, 0x08 },
{ 0xE0, 0x02 },
{ 0x00, 0x57 },
{ 0x01, 0x77 },
{ 0x02, 0x44 },
{ 0x03, 0x46 },
{ 0x04, 0x48 },
{ 0x05, 0x4A },
{ 0x06, 0x4C },
{ 0x07, 0x4E },
{ 0x08, 0x50 },
{ 0x09, 0x55 },
{ 0x0A, 0x52 },
{ 0x0B, 0x55 },
{ 0x0C, 0x55 },
{ 0x0D, 0x55 },
{ 0x0E, 0x55 },
{ 0x0F, 0x55 },
{ 0x10, 0x55 },
{ 0x11, 0x55 },
{ 0x12, 0x55 },
{ 0x13, 0x40 },
{ 0x14, 0x55 },
{ 0x15, 0x55 },
{ 0x16, 0x57 },
{ 0x17, 0x77 },
{ 0x18, 0x45 },
{ 0x19, 0x47 },
{ 0x1A, 0x49 },
{ 0x1B, 0x4B },
{ 0x1C, 0x4D },
{ 0x1D, 0x4F },
{ 0x1E, 0x51 },
{ 0x1F, 0x55 },
{ 0x20, 0x53 },
{ 0x21, 0x55 },
{ 0x22, 0x55 },
{ 0x23, 0x55 },
{ 0x24, 0x55 },
{ 0x25, 0x55 },
{ 0x26, 0x55 },
{ 0x27, 0x55 },
{ 0x28, 0x55 },
{ 0x29, 0x41 },
{ 0x2A, 0x55 },
{ 0x2B, 0x55 },
{ 0x2C, 0x57 },
{ 0x2D, 0x77 },
{ 0x2E, 0x4F },
{ 0x2F, 0x4D },
{ 0x30, 0x4B },
{ 0x31, 0x49 },
{ 0x32, 0x47 },
{ 0x33, 0x45 },
{ 0x34, 0x41 },
{ 0x35, 0x55 },
{ 0x36, 0x53 },
{ 0x37, 0x55 },
{ 0x38, 0x55 },
{ 0x39, 0x55 },
{ 0x3A, 0x55 },
{ 0x3B, 0x55 },
{ 0x3C, 0x55 },
{ 0x3D, 0x55 },
{ 0x3E, 0x55 },
{ 0x3F, 0x51 },
{ 0x40, 0x55 },
{ 0x41, 0x55 },
{ 0x42, 0x57 },
{ 0x43, 0x77 },
{ 0x44, 0x4E },
{ 0x45, 0x4C },
{ 0x46, 0x4A },
{ 0x47, 0x48 },
{ 0x48, 0x46 },
{ 0x49, 0x44 },
{ 0x4A, 0x40 },
{ 0x4B, 0x55 },
{ 0x4C, 0x52 },
{ 0x4D, 0x55 },
{ 0x4E, 0x55 },
{ 0x4F, 0x55 },
{ 0x50, 0x55 },
{ 0x51, 0x55 },
{ 0x52, 0x55 },
{ 0x53, 0x55 },
{ 0x54, 0x55 },
{ 0x55, 0x50 },
{ 0x56, 0x55 },
{ 0x57, 0x55 },
{ 0x58, 0x40 },
{ 0x59, 0x00 },
{ 0x5A, 0x00 },
{ 0x5B, 0x10 },
{ 0x5C, 0x09 },
{ 0x5D, 0x30 },
{ 0x5E, 0x01 },
{ 0x5F, 0x02 },
{ 0x60, 0x30 },
{ 0x61, 0x03 },
{ 0x62, 0x04 },
{ 0x63, 0x06 },
{ 0x64, 0x6A },
{ 0x65, 0x75 },
{ 0x66, 0x0F },
{ 0x67, 0xB3 },
{ 0x68, 0x0B },
{ 0x69, 0x06 },
{ 0x6A, 0x6A },
{ 0x6B, 0x10 },
{ 0x6C, 0x00 },
{ 0x6D, 0x04 },
{ 0x6E, 0x04 },
{ 0x6F, 0x88 },
{ 0x70, 0x00 },
{ 0x71, 0x00 },
{ 0x72, 0x06 },
{ 0x73, 0x7B },
{ 0x74, 0x00 },
{ 0x75, 0xBC },
{ 0x76, 0x00 },
{ 0x77, 0x05 },
{ 0x78, 0x2E },
{ 0x79, 0x00 },
{ 0x7A, 0x00 },
{ 0x7B, 0x00 },
{ 0x7C, 0x00 },
{ 0x7D, 0x03 },
{ 0x7E, 0x7B },
{ 0xE0, 0x04 },
{ 0x09, 0x10 },
{ 0x2B, 0x2B },
{ 0x2E, 0x44 },
{ 0xE0, 0x00 },
{ 0xE6, 0x02 },
{ 0xE7, 0x02 },
{ 0x35, 0x00 },
};
static inline
struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel)
{
return container_of(panel, struct ltk500hd1829, panel);
}
static int ltk500hd1829_unprepare(struct drm_panel *panel)
{
struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (!ctx->prepared)
return 0;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(panel->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
}
/* 120ms to enter sleep mode */
msleep(120);
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vcc);
ctx->prepared = false;
return 0;
}
static int ltk500hd1829_prepare(struct drm_panel *panel)
{
struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
unsigned int i;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_enable(ctx->vcc);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vcc;
}
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
/* tRW: 10us */
usleep_range(10, 20);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
/* tRT: >= 5ms */
usleep_range(5000, 6000);
for (i = 0; i < ARRAY_SIZE(init_code); i++) {
ret = mipi_dsi_generic_write(dsi, &init_code[i],
sizeof(struct ltk500hd1829_cmd));
if (ret < 0) {
dev_err(panel->dev, "failed to write init cmds: %d\n", ret);
goto disable_iovcc;
}
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(panel->dev, "failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
/* 120ms to exit sleep mode */
msleep(120);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(panel->dev, "failed to set display on: %d\n", ret);
goto disable_iovcc;
}
ctx->prepared = true;
return 0;
disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vcc:
regulator_disable(ctx->vcc);
return ret;
}
static const struct drm_display_mode default_mode = {
.hdisplay = 720,
.hsync_start = 720 + 50,
.hsync_end = 720 + 50 + 50,
.htotal = 720 + 50 + 50 + 50,
.vdisplay = 1280,
.vsync_start = 1280 + 30,
.vsync_end = 1280 + 30 + 4,
.vtotal = 1280 + 30 + 4 + 12,
.clock = 69217,
.width_mm = 62,
.height_mm = 110,
};
static int ltk500hd1829_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(ctx->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs ltk500hd1829_funcs = {
.unprepare = ltk500hd1829_unprepare,
.prepare = ltk500hd1829_prepare,
.get_modes = ltk500hd1829_get_modes,
};
static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
{
struct ltk500hd1829 *ctx;
struct device *dev = &dsi->dev;
int ret;
ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
ctx->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(ctx->vcc)) {
ret = PTR_ERR(ctx->vcc);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
return ret;
}
ctx->iovcc = devm_regulator_get(dev, "iovcc");
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
drm_panel_init(&ctx->panel, &dsi->dev, <k500hd1829_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
{
struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static void ltk500hd1829_remove(struct mipi_dsi_device *dsi)
{
struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ltk500hd1829_shutdown(dsi);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id ltk500hd1829_of_match[] = {
{ .compatible = "leadtek,ltk500hd1829", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match);
static struct mipi_dsi_driver ltk500hd1829_driver = {
.driver = {
.name = "panel-leadtek-ltk500hd1829",
.of_match_table = ltk500hd1829_of_match,
},
.probe = ltk500hd1829_probe,
.remove = ltk500hd1829_remove,
.shutdown = ltk500hd1829_shutdown,
};
module_mipi_dsi_driver(ltk500hd1829_driver);
MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
MODULE_DESCRIPTION("Leadtek LTK500HD1829 panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic DSI Command Mode panel driver
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
* Author: Tomi Valkeinen <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_connector.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
#define DCS_GET_ID1 0xda
#define DCS_GET_ID2 0xdb
#define DCS_GET_ID3 0xdc
#define DCS_REGULATOR_SUPPLY_NUM 2
static const struct of_device_id dsicm_of_match[];
struct dsic_panel_data {
u32 xres;
u32 yres;
u32 refresh;
u32 width_mm;
u32 height_mm;
u32 max_hs_rate;
u32 max_lp_rate;
bool te_support;
};
struct panel_drv_data {
struct mipi_dsi_device *dsi;
struct drm_panel panel;
struct drm_display_mode mode;
struct mutex lock;
struct backlight_device *bldev;
struct backlight_device *extbldev;
unsigned long hw_guard_end; /* next value of jiffies when we can
* issue the next sleep in/out command
*/
unsigned long hw_guard_wait; /* max guard time in jiffies */
const struct dsic_panel_data *panel_data;
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[DCS_REGULATOR_SUPPLY_NUM];
bool use_dsi_backlight;
/* runtime variables */
bool enabled;
bool intro_printed;
};
static inline struct panel_drv_data *panel_to_ddata(struct drm_panel *panel)
{
return container_of(panel, struct panel_drv_data, panel);
}
static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
{
struct backlight_device *backlight;
if (ddata->bldev)
backlight = ddata->bldev;
else if (ddata->extbldev)
backlight = ddata->extbldev;
else
return;
if (enable)
backlight_enable(backlight);
else
backlight_disable(backlight);
}
static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
{
ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
}
static void hw_guard_wait(struct panel_drv_data *ddata)
{
unsigned long wait = ddata->hw_guard_end - jiffies;
if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait);
}
}
static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
{
return mipi_dsi_dcs_read(ddata->dsi, dcs_cmd, data, 1);
}
static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
{
return mipi_dsi_dcs_write(ddata->dsi, dcs_cmd, ¶m, 1);
}
static int dsicm_sleep_in(struct panel_drv_data *ddata)
{
int r;
hw_guard_wait(ddata);
r = mipi_dsi_dcs_enter_sleep_mode(ddata->dsi);
if (r)
return r;
hw_guard_start(ddata, 120);
usleep_range(5000, 10000);
return 0;
}
static int dsicm_sleep_out(struct panel_drv_data *ddata)
{
int r;
hw_guard_wait(ddata);
r = mipi_dsi_dcs_exit_sleep_mode(ddata->dsi);
if (r)
return r;
hw_guard_start(ddata, 120);
usleep_range(5000, 10000);
return 0;
}
static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
{
int r;
r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
if (r)
return r;
r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
if (r)
return r;
r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
if (r)
return r;
return 0;
}
static int dsicm_set_update_window(struct panel_drv_data *ddata)
{
struct mipi_dsi_device *dsi = ddata->dsi;
int r;
r = mipi_dsi_dcs_set_column_address(dsi, 0, ddata->mode.hdisplay - 1);
if (r < 0)
return r;
r = mipi_dsi_dcs_set_page_address(dsi, 0, ddata->mode.vdisplay - 1);
if (r < 0)
return r;
return 0;
}
static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
int r = 0;
int level = backlight_get_brightness(dev);
dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level);
mutex_lock(&ddata->lock);
if (ddata->enabled)
r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
level);
mutex_unlock(&ddata->lock);
return r;
}
static int dsicm_bl_get_intensity(struct backlight_device *dev)
{
return backlight_get_brightness(dev);
}
static const struct backlight_ops dsicm_bl_ops = {
.get_brightness = dsicm_bl_get_intensity,
.update_status = dsicm_bl_update_status,
};
static ssize_t num_dsi_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
u8 errors = 0;
int r = -ENODEV;
mutex_lock(&ddata->lock);
if (ddata->enabled)
r = dsicm_dcs_read_1(ddata, MIPI_DCS_GET_ERROR_COUNT_ON_DSI, &errors);
mutex_unlock(&ddata->lock);
if (r)
return r;
return sysfs_emit(buf, "%d\n", errors);
}
static ssize_t hw_revision_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
u8 id1, id2, id3;
int r = -ENODEV;
mutex_lock(&ddata->lock);
if (ddata->enabled)
r = dsicm_get_id(ddata, &id1, &id2, &id3);
mutex_unlock(&ddata->lock);
if (r)
return r;
return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3);
}
static DEVICE_ATTR_RO(num_dsi_errors);
static DEVICE_ATTR_RO(hw_revision);
static struct attribute *dsicm_attrs[] = {
&dev_attr_num_dsi_errors.attr,
&dev_attr_hw_revision.attr,
NULL,
};
static const struct attribute_group dsicm_attr_group = {
.attrs = dsicm_attrs,
};
static void dsicm_hw_reset(struct panel_drv_data *ddata)
{
gpiod_set_value(ddata->reset_gpio, 1);
udelay(10);
/* reset the panel */
gpiod_set_value(ddata->reset_gpio, 0);
/* assert reset */
udelay(10);
gpiod_set_value(ddata->reset_gpio, 1);
/* wait after releasing reset */
usleep_range(5000, 10000);
}
static int dsicm_power_on(struct panel_drv_data *ddata)
{
u8 id1, id2, id3;
int r;
dsicm_hw_reset(ddata);
ddata->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
r = dsicm_sleep_out(ddata);
if (r)
goto err;
r = dsicm_get_id(ddata, &id1, &id2, &id3);
if (r)
goto err;
r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
if (r)
goto err;
r = dsicm_dcs_write_1(ddata, MIPI_DCS_WRITE_CONTROL_DISPLAY,
(1<<2) | (1<<5)); /* BL | BCTRL */
if (r)
goto err;
r = mipi_dsi_dcs_set_pixel_format(ddata->dsi, MIPI_DCS_PIXEL_FMT_24BIT);
if (r)
goto err;
r = dsicm_set_update_window(ddata);
if (r)
goto err;
r = mipi_dsi_dcs_set_display_on(ddata->dsi);
if (r)
goto err;
if (ddata->panel_data->te_support) {
r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (r)
goto err;
}
/* possible panel bug */
msleep(100);
ddata->enabled = true;
if (!ddata->intro_printed) {
dev_info(&ddata->dsi->dev, "panel revision %02x.%02x.%02x\n",
id1, id2, id3);
ddata->intro_printed = true;
}
ddata->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
return 0;
err:
dev_err(&ddata->dsi->dev, "error while enabling panel, issuing HW reset\n");
dsicm_hw_reset(ddata);
return r;
}
static int dsicm_power_off(struct panel_drv_data *ddata)
{
int r;
ddata->enabled = false;
r = mipi_dsi_dcs_set_display_off(ddata->dsi);
if (!r)
r = dsicm_sleep_in(ddata);
if (r) {
dev_err(&ddata->dsi->dev,
"error disabling panel, issuing HW reset\n");
dsicm_hw_reset(ddata);
}
return r;
}
static int dsicm_prepare(struct drm_panel *panel)
{
struct panel_drv_data *ddata = panel_to_ddata(panel);
int r;
r = regulator_bulk_enable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
if (r)
dev_err(&ddata->dsi->dev, "failed to enable supplies: %d\n", r);
return r;
}
static int dsicm_enable(struct drm_panel *panel)
{
struct panel_drv_data *ddata = panel_to_ddata(panel);
int r;
mutex_lock(&ddata->lock);
r = dsicm_power_on(ddata);
if (r)
goto err;
mutex_unlock(&ddata->lock);
dsicm_bl_power(ddata, true);
return 0;
err:
dev_err(&ddata->dsi->dev, "enable failed (%d)\n", r);
mutex_unlock(&ddata->lock);
return r;
}
static int dsicm_unprepare(struct drm_panel *panel)
{
struct panel_drv_data *ddata = panel_to_ddata(panel);
int r;
r = regulator_bulk_disable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
if (r)
dev_err(&ddata->dsi->dev, "failed to disable supplies: %d\n", r);
return r;
}
static int dsicm_disable(struct drm_panel *panel)
{
struct panel_drv_data *ddata = panel_to_ddata(panel);
int r;
dsicm_bl_power(ddata, false);
mutex_lock(&ddata->lock);
r = dsicm_power_off(ddata);
mutex_unlock(&ddata->lock);
return r;
}
static int dsicm_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct panel_drv_data *ddata = panel_to_ddata(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &ddata->mode);
if (!mode) {
dev_err(&ddata->dsi->dev, "failed to add mode %ux%ux@%u kHz\n",
ddata->mode.hdisplay, ddata->mode.vdisplay,
ddata->mode.clock);
return -ENOMEM;
}
connector->display_info.width_mm = ddata->panel_data->width_mm;
connector->display_info.height_mm = ddata->panel_data->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs dsicm_panel_funcs = {
.unprepare = dsicm_unprepare,
.disable = dsicm_disable,
.prepare = dsicm_prepare,
.enable = dsicm_enable,
.get_modes = dsicm_get_modes,
};
static int dsicm_probe_of(struct mipi_dsi_device *dsi)
{
struct backlight_device *backlight;
struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
int err;
struct drm_display_mode *mode = &ddata->mode;
ddata->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ddata->reset_gpio)) {
err = PTR_ERR(ddata->reset_gpio);
dev_err(&dsi->dev, "reset gpio request failed: %d", err);
return err;
}
mode->hdisplay = mode->hsync_start = mode->hsync_end = mode->htotal =
ddata->panel_data->xres;
mode->vdisplay = mode->vsync_start = mode->vsync_end = mode->vtotal =
ddata->panel_data->yres;
mode->clock = ddata->panel_data->xres * ddata->panel_data->yres *
ddata->panel_data->refresh / 1000;
mode->width_mm = ddata->panel_data->width_mm;
mode->height_mm = ddata->panel_data->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
ddata->supplies[0].supply = "vpnl";
ddata->supplies[1].supply = "vddi";
err = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ddata->supplies),
ddata->supplies);
if (err)
return err;
backlight = devm_of_find_backlight(&dsi->dev);
if (IS_ERR(backlight))
return PTR_ERR(backlight);
/* If no backlight device is found assume native backlight support */
if (backlight)
ddata->extbldev = backlight;
else
ddata->use_dsi_backlight = true;
return 0;
}
static int dsicm_probe(struct mipi_dsi_device *dsi)
{
struct panel_drv_data *ddata;
struct backlight_device *bldev = NULL;
struct device *dev = &dsi->dev;
int r;
dev_dbg(dev, "probe\n");
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, ddata);
ddata->dsi = dsi;
ddata->panel_data = of_device_get_match_data(dev);
if (!ddata->panel_data)
return -ENODEV;
r = dsicm_probe_of(dsi);
if (r)
return r;
mutex_init(&ddata->lock);
dsicm_hw_reset(ddata);
drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ddata->use_dsi_backlight) {
struct backlight_properties props = { 0 };
props.max_brightness = 255;
props.type = BACKLIGHT_RAW;
bldev = devm_backlight_device_register(dev, dev_name(dev),
dev, ddata, &dsicm_bl_ops, &props);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
goto err_bl;
}
ddata->bldev = bldev;
}
r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
if (r) {
dev_err(dev, "failed to create sysfs files\n");
goto err_bl;
}
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_NO_EOT_PACKET;
dsi->hs_rate = ddata->panel_data->max_hs_rate;
dsi->lp_rate = ddata->panel_data->max_lp_rate;
drm_panel_add(&ddata->panel);
r = mipi_dsi_attach(dsi);
if (r < 0)
goto err_dsi_attach;
return 0;
err_dsi_attach:
drm_panel_remove(&ddata->panel);
sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
err_bl:
if (ddata->extbldev)
put_device(&ddata->extbldev->dev);
return r;
}
static void dsicm_remove(struct mipi_dsi_device *dsi)
{
struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
dev_dbg(&dsi->dev, "remove\n");
mipi_dsi_detach(dsi);
drm_panel_remove(&ddata->panel);
sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
if (ddata->extbldev)
put_device(&ddata->extbldev->dev);
}
static const struct dsic_panel_data taal_data = {
.xres = 864,
.yres = 480,
.refresh = 60,
.width_mm = 0,
.height_mm = 0,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
.te_support = true,
};
static const struct dsic_panel_data himalaya_data = {
.xres = 480,
.yres = 864,
.refresh = 60,
.width_mm = 49,
.height_mm = 88,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
.te_support = false,
};
static const struct dsic_panel_data droid4_data = {
.xres = 540,
.yres = 960,
.refresh = 60,
.width_mm = 50,
.height_mm = 89,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
.te_support = false,
};
static const struct of_device_id dsicm_of_match[] = {
{ .compatible = "tpo,taal", .data = &taal_data },
{ .compatible = "nokia,himalaya", &himalaya_data },
{ .compatible = "motorola,droid4-panel", &droid4_data },
{},
};
MODULE_DEVICE_TABLE(of, dsicm_of_match);
static struct mipi_dsi_driver dsicm_driver = {
.probe = dsicm_probe,
.remove = dsicm_remove,
.driver = {
.name = "panel-dsi-cm",
.of_match_table = dsicm_of_match,
},
};
module_mipi_dsi_driver(dsicm_driver);
MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-dsi-cm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AU Optronics A030JTN01.0 TFT LCD panel driver
*
* Copyright (C) 2023, Paul Cercueil <[email protected]>
* Copyright (C) 2023, Christophe Branchereau <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define REG05 0x05
#define REG06 0x06
#define REG07 0x07
#define REG05_STDBY BIT(0)
#define REG06_VBLK GENMASK(4, 0)
#define REG07_HBLK GENMASK(7, 0)
struct a030jtn01_info {
const struct drm_display_mode *display_modes;
unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
};
struct a030jtn01 {
struct drm_panel panel;
struct spi_device *spi;
struct regmap *map;
const struct a030jtn01_info *panel_info;
struct regulator *supply;
struct gpio_desc *reset_gpio;
};
static inline struct a030jtn01 *to_a030jtn01(struct drm_panel *panel)
{
return container_of(panel, struct a030jtn01, panel);
}
static int a030jtn01_prepare(struct drm_panel *panel)
{
struct a030jtn01 *priv = to_a030jtn01(panel);
struct device *dev = &priv->spi->dev;
unsigned int dummy;
int err;
err = regulator_enable(priv->supply);
if (err) {
dev_err(dev, "Failed to enable power supply: %d\n", err);
return err;
}
usleep_range(1000, 8000);
/* Reset the chip */
gpiod_set_value_cansleep(priv->reset_gpio, 1);
usleep_range(100, 8000);
gpiod_set_value_cansleep(priv->reset_gpio, 0);
usleep_range(2000, 8000);
/*
* No idea why, but a register read (doesn't matter which) is needed to
* properly initialize the chip after a reset; otherwise, the colors
* will be wrong. It doesn't seem to be timing-related as a msleep(200)
* doesn't fix it.
*/
err = regmap_read(priv->map, REG05, &dummy);
if (err)
goto err_disable_regulator;
/* Use (24 + 6) == 0x1e as the vertical back porch */
err = regmap_write(priv->map, REG06, FIELD_PREP(REG06_VBLK, 0x1e));
if (err)
goto err_disable_regulator;
/* Use (42 + 30) * 3 == 0xd8 as the horizontal back porch */
err = regmap_write(priv->map, REG07, FIELD_PREP(REG07_HBLK, 0xd8));
if (err)
goto err_disable_regulator;
return 0;
err_disable_regulator:
gpiod_set_value_cansleep(priv->reset_gpio, 1);
regulator_disable(priv->supply);
return err;
}
static int a030jtn01_unprepare(struct drm_panel *panel)
{
struct a030jtn01 *priv = to_a030jtn01(panel);
gpiod_set_value_cansleep(priv->reset_gpio, 1);
regulator_disable(priv->supply);
return 0;
}
static int a030jtn01_enable(struct drm_panel *panel)
{
struct a030jtn01 *priv = to_a030jtn01(panel);
int ret;
ret = regmap_set_bits(priv->map, REG05, REG05_STDBY);
if (ret)
return ret;
/* Wait for the picture to be stable */
if (panel->backlight)
msleep(100);
return 0;
}
static int a030jtn01_disable(struct drm_panel *panel)
{
struct a030jtn01 *priv = to_a030jtn01(panel);
return regmap_clear_bits(priv->map, REG05, REG05_STDBY);
}
static int a030jtn01_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct a030jtn01 *priv = to_a030jtn01(panel);
const struct a030jtn01_info *panel_info = priv->panel_info;
struct drm_display_mode *mode;
unsigned int i;
for (i = 0; i < panel_info->num_modes; i++) {
mode = drm_mode_duplicate(connector->dev,
&panel_info->display_modes[i]);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER;
if (panel_info->num_modes == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = panel_info->width_mm;
connector->display_info.height_mm = panel_info->height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&panel_info->bus_format, 1);
connector->display_info.bus_flags = panel_info->bus_flags;
return panel_info->num_modes;
}
static const struct drm_panel_funcs a030jtn01_funcs = {
.prepare = a030jtn01_prepare,
.unprepare = a030jtn01_unprepare,
.enable = a030jtn01_enable,
.disable = a030jtn01_disable,
.get_modes = a030jtn01_get_modes,
};
static bool a030jtn01_has_reg(struct device *dev, unsigned int reg)
{
static const u32 a030jtn01_regs_mask = 0x001823f1fb;
return a030jtn01_regs_mask & BIT(reg);
};
static const struct regmap_config a030jtn01_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.read_flag_mask = 0x40,
.max_register = 0x1c,
.readable_reg = a030jtn01_has_reg,
.writeable_reg = a030jtn01_has_reg,
};
static int a030jtn01_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct a030jtn01 *priv;
int err;
spi->mode |= SPI_MODE_3 | SPI_3WIRE;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->spi = spi;
spi_set_drvdata(spi, priv);
priv->map = devm_regmap_init_spi(spi, &a030jtn01_regmap_config);
if (IS_ERR(priv->map))
return dev_err_probe(dev, PTR_ERR(priv->map), "Unable to init regmap");
priv->panel_info = spi_get_device_match_data(spi);
if (!priv->panel_info)
return -EINVAL;
priv->supply = devm_regulator_get(dev, "power");
if (IS_ERR(priv->supply))
return dev_err_probe(dev, PTR_ERR(priv->supply), "Failed to get power supply");
priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
DRM_MODE_CONNECTOR_DPI);
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
drm_panel_add(&priv->panel);
return 0;
}
static void a030jtn01_remove(struct spi_device *spi)
{
struct a030jtn01 *priv = spi_get_drvdata(spi);
drm_panel_remove(&priv->panel);
drm_panel_disable(&priv->panel);
drm_panel_unprepare(&priv->panel);
}
static const struct drm_display_mode a030jtn01_modes[] = {
{ /* 60 Hz */
.clock = 14400,
.hdisplay = 320,
.hsync_start = 320 + 8,
.hsync_end = 320 + 8 + 42,
.htotal = 320 + 8 + 42 + 30,
.vdisplay = 480,
.vsync_start = 480 + 90,
.vsync_end = 480 + 90 + 24,
.vtotal = 480 + 90 + 24 + 6,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
{ /* 50 Hz */
.clock = 12000,
.hdisplay = 320,
.hsync_start = 320 + 8,
.hsync_end = 320 + 8 + 42,
.htotal = 320 + 8 + 42 + 30,
.vdisplay = 480,
.vsync_start = 480 + 90,
.vsync_end = 480 + 90 + 24,
.vtotal = 480 + 90 + 24 + 6,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
};
static const struct a030jtn01_info a030jtn01_info = {
.display_modes = a030jtn01_modes,
.num_modes = ARRAY_SIZE(a030jtn01_modes),
.width_mm = 70,
.height_mm = 51,
.bus_format = MEDIA_BUS_FMT_RGB888_3X8_DELTA,
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct spi_device_id a030jtn01_id[] = {
{ "a030jtn01", (kernel_ulong_t) &a030jtn01_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, a030jtn01_id);
static const struct of_device_id a030jtn01_of_match[] = {
{ .compatible = "auo,a030jtn01" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, a030jtn01_of_match);
static struct spi_driver a030jtn01_driver = {
.driver = {
.name = "auo-a030jtn01",
.of_match_table = a030jtn01_of_match,
},
.id_table = a030jtn01_id,
.probe = a030jtn01_probe,
.remove = a030jtn01_remove,
};
module_spi_driver(a030jtn01_driver);
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_AUTHOR("Christophe Branchereau <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-auo-a030jtn01.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct visionox_rm69299 {
struct drm_panel panel;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
struct mipi_dsi_device *dsi;
bool prepared;
bool enabled;
};
static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
{
return container_of(panel, struct visionox_rm69299, panel);
}
static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
{
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
/*
* Reset sequence of visionox panel requires the panel to be
* out of reset for 10ms, followed by being held in reset
* for 10ms and then out again
*/
gpiod_set_value(ctx->reset_gpio, 1);
usleep_range(10000, 20000);
gpiod_set_value(ctx->reset_gpio, 0);
usleep_range(10000, 20000);
gpiod_set_value(ctx->reset_gpio, 1);
usleep_range(10000, 20000);
return 0;
}
static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
{
gpiod_set_value(ctx->reset_gpio, 0);
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static int visionox_rm69299_unprepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
int ret;
ctx->dsi->mode_flags = 0;
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
if (ret < 0)
dev_err(ctx->panel.dev, "set_display_off cmd failed ret = %d\n", ret);
/* 120ms delay required here as per DCS spec */
msleep(120);
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
if (ret < 0) {
dev_err(ctx->panel.dev, "enter_sleep cmd failed ret = %d\n", ret);
}
ret = visionox_rm69299_power_off(ctx);
ctx->prepared = false;
return ret;
}
static int visionox_rm69299_prepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
int ret;
if (ctx->prepared)
return 0;
ret = visionox_rm69299_power_on(ctx);
if (ret < 0)
return ret;
ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
if (ret < 0) {
dev_err(ctx->panel.dev, "cmd set tx 0 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
if (ret < 0) {
dev_err(ctx->panel.dev, "cmd set tx 1 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
if (ret < 0) {
dev_err(ctx->panel.dev, "cmd set tx 2 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
if (ret < 0) {
dev_err(ctx->panel.dev, "cmd set tx 3 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
if (ret < 0) {
dev_err(ctx->panel.dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
goto power_off;
}
/* Per DSI spec wait 120ms after sending exit sleep DCS command */
msleep(120);
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
if (ret < 0) {
dev_err(ctx->panel.dev, "set_display_on cmd failed ret = %d\n", ret);
goto power_off;
}
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
msleep(120);
ctx->prepared = true;
return 0;
power_off:
return ret;
}
static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
.name = "1080x2248",
.clock = 158695,
.hdisplay = 1080,
.hsync_start = 1080 + 26,
.hsync_end = 1080 + 26 + 2,
.htotal = 1080 + 26 + 2 + 36,
.vdisplay = 2248,
.vsync_start = 2248 + 56,
.vsync_end = 2248 + 56 + 4,
.vtotal = 2248 + 56 + 4 + 4,
.flags = 0,
};
static int visionox_rm69299_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev,
&visionox_rm69299_1080x2248_60hz);
if (!mode) {
dev_err(ctx->panel.dev, "failed to create a new display mode\n");
return 0;
}
connector->display_info.width_mm = 74;
connector->display_info.height_mm = 131;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
.unprepare = visionox_rm69299_unprepare,
.prepare = visionox_rm69299_prepare,
.get_modes = visionox_rm69299_get_modes,
};
static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct visionox_rm69299 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->panel.dev = dev;
ctx->dsi = dsi;
ctx->supplies[0].supply = "vdda";
ctx->supplies[1].supply = "vdd3p3";
ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return ret;
ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.dev = dev;
ctx->panel.funcs = &visionox_rm69299_drm_funcs;
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "dsi attach failed ret = %d\n", ret);
goto err_dsi_attach;
}
ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
if (ret) {
dev_err(dev, "regulator set load failed for vdda supply ret = %d\n", ret);
goto err_set_load;
}
ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
if (ret) {
dev_err(dev, "regulator set load failed for vdd3p3 supply ret = %d\n", ret);
goto err_set_load;
}
return 0;
err_set_load:
mipi_dsi_detach(dsi);
err_dsi_attach:
drm_panel_remove(&ctx->panel);
return ret;
}
static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
{
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id visionox_rm69299_of_match[] = {
{ .compatible = "visionox,rm69299-1080p-display", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
static struct mipi_dsi_driver visionox_rm69299_driver = {
.driver = {
.name = "panel-visionox-rm69299",
.of_match_table = visionox_rm69299_of_match,
},
.probe = visionox_rm69299_probe,
.remove = visionox_rm69299_remove,
};
module_mipi_dsi_driver(visionox_rm69299_driver);
MODULE_DESCRIPTION("Visionox RM69299 DSI Panel Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-visionox-rm69299.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Red Hat
* Copyright (C) 2015 Sony Mobile Communications Inc.
* Author: Werner Johansson <[email protected]>
*
* Based on AUO panel driver by Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
/*
* When power is turned off to this panel a minimum off time of 500ms has to be
* observed before powering back on as there's no external reset pin. Keep
* track of earliest wakeup time and delay subsequent prepare call accordingly
*/
#define MIN_POFF_MS (500)
struct wuxga_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
struct regulator *supply;
bool prepared;
bool enabled;
ktime_t earliest_wake;
const struct drm_display_mode *mode;
};
static inline struct wuxga_nt_panel *to_wuxga_nt_panel(struct drm_panel *panel)
{
return container_of(panel, struct wuxga_nt_panel, base);
}
static int wuxga_nt_panel_on(struct wuxga_nt_panel *wuxga_nt)
{
return mipi_dsi_turn_on_peripheral(wuxga_nt->dsi);
}
static int wuxga_nt_panel_disable(struct drm_panel *panel)
{
struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
int mipi_ret, bl_ret = 0;
if (!wuxga_nt->enabled)
return 0;
mipi_ret = mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
wuxga_nt->enabled = false;
return mipi_ret ? mipi_ret : bl_ret;
}
static int wuxga_nt_panel_unprepare(struct drm_panel *panel)
{
struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
if (!wuxga_nt->prepared)
return 0;
regulator_disable(wuxga_nt->supply);
wuxga_nt->earliest_wake = ktime_add_ms(ktime_get_real(), MIN_POFF_MS);
wuxga_nt->prepared = false;
return 0;
}
static int wuxga_nt_panel_prepare(struct drm_panel *panel)
{
struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
int ret;
s64 enablewait;
if (wuxga_nt->prepared)
return 0;
/*
* If the user re-enabled the panel before the required off-time then
* we need to wait the remaining period before re-enabling regulator
*/
enablewait = ktime_ms_delta(wuxga_nt->earliest_wake, ktime_get_real());
/* Sanity check, this should never happen */
if (enablewait > MIN_POFF_MS)
enablewait = MIN_POFF_MS;
if (enablewait > 0)
msleep(enablewait);
ret = regulator_enable(wuxga_nt->supply);
if (ret < 0)
return ret;
/*
* A minimum delay of 250ms is required after power-up until commands
* can be sent
*/
msleep(250);
ret = wuxga_nt_panel_on(wuxga_nt);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel on: %d\n", ret);
goto poweroff;
}
wuxga_nt->prepared = true;
return 0;
poweroff:
regulator_disable(wuxga_nt->supply);
return ret;
}
static int wuxga_nt_panel_enable(struct drm_panel *panel)
{
struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
if (wuxga_nt->enabled)
return 0;
wuxga_nt->enabled = true;
return 0;
}
static const struct drm_display_mode default_mode = {
.clock = 164402,
.hdisplay = 1920,
.hsync_start = 1920 + 152,
.hsync_end = 1920 + 152 + 52,
.htotal = 1920 + 152 + 52 + 20,
.vdisplay = 1200,
.vsync_start = 1200 + 24,
.vsync_end = 1200 + 24 + 6,
.vtotal = 1200 + 24 + 6 + 48,
};
static int wuxga_nt_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 217;
connector->display_info.height_mm = 136;
return 1;
}
static const struct drm_panel_funcs wuxga_nt_panel_funcs = {
.disable = wuxga_nt_panel_disable,
.unprepare = wuxga_nt_panel_unprepare,
.prepare = wuxga_nt_panel_prepare,
.enable = wuxga_nt_panel_enable,
.get_modes = wuxga_nt_panel_get_modes,
};
static const struct of_device_id wuxga_nt_of_match[] = {
{ .compatible = "panasonic,vvx10f034n00", },
{ }
};
MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
{
struct device *dev = &wuxga_nt->dsi->dev;
int ret;
wuxga_nt->mode = &default_mode;
wuxga_nt->supply = devm_regulator_get(dev, "power");
if (IS_ERR(wuxga_nt->supply))
return PTR_ERR(wuxga_nt->supply);
drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
&wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&wuxga_nt->base);
if (ret)
return ret;
drm_panel_add(&wuxga_nt->base);
return 0;
}
static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
{
if (wuxga_nt->base.dev)
drm_panel_remove(&wuxga_nt->base);
}
static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt;
int ret;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
if (!wuxga_nt)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, wuxga_nt);
wuxga_nt->dsi = dsi;
ret = wuxga_nt_panel_add(wuxga_nt);
if (ret < 0)
return ret;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
wuxga_nt_panel_del(wuxga_nt);
return ret;
}
return 0;
}
static void wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_disable(&wuxga_nt->base);
if (ret < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
wuxga_nt_panel_del(wuxga_nt);
}
static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
drm_panel_disable(&wuxga_nt->base);
}
static struct mipi_dsi_driver wuxga_nt_panel_driver = {
.driver = {
.name = "panel-panasonic-vvx10f034n00",
.of_match_table = wuxga_nt_of_match,
},
.probe = wuxga_nt_panel_probe,
.remove = wuxga_nt_panel_remove,
.shutdown = wuxga_nt_panel_shutdown,
};
module_mipi_dsi_driver(wuxga_nt_panel_driver);
MODULE_AUTHOR("Werner Johansson <[email protected]>");
MODULE_DESCRIPTION("Panasonic VVX10F034N00 Novatek NT1397-based WUXGA (1920x1200) video mode panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2023, Linaro Limited
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <drm/display/drm_dsc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
struct visionox_vtdr6130 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[3];
bool prepared;
};
static inline struct visionox_vtdr6130 *to_visionox_vtdr6130(struct drm_panel *panel)
{
return container_of(panel, struct visionox_vtdr6130, panel);
}
static void visionox_vtdr6130_reset(struct visionox_vtdr6130 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(10000, 11000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(10000, 11000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(10000, 11000);
}
static int visionox_vtdr6130_on(struct visionox_vtdr6130 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret)
return ret;
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x59, 0x09);
mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x70,
0x12, 0x00, 0x00, 0xab, 0x30, 0x80, 0x09, 0x60, 0x04,
0x38, 0x00, 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00, 0x07, 0x00,
0x0c, 0x02, 0x77, 0x02, 0x8b, 0x18, 0x00, 0x10, 0xf0,
0x07, 0x10, 0x20, 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e,
0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70, 0x77,
0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02, 0x22, 0x00, 0x2a,
0x40, 0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
0x3b, 0x38, 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0xb1,
0x01, 0x38, 0x00, 0x14, 0x00, 0x1c, 0x00, 0x01, 0x66,
0x00, 0x14, 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
0x05, 0xcc, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x13);
mipi_dsi_dcs_write_seq(dsi, 0xce,
0x09, 0x11, 0x09, 0x11, 0x08, 0xc1, 0x07, 0xfa, 0x05,
0xa4, 0x00, 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
0x00, 0x0c, 0x04, 0x00, 0x35);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x14);
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x03, 0x33);
mipi_dsi_dcs_write_seq(dsi, 0xb4,
0x00, 0x33, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00,
0x3e, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xb5,
0x00, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x06, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0x00, 0x08, 0x09, 0x09, 0x09);
mipi_dsi_dcs_write_seq(dsi, 0xbc,
0x10, 0x00, 0x00, 0x06, 0x11, 0x09, 0x3b, 0x09, 0x47,
0x09, 0x47, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xbe,
0x10, 0x10, 0x00, 0x08, 0x22, 0x09, 0x19, 0x09, 0x25,
0x09, 0x25, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x80);
mipi_dsi_dcs_write_seq(dsi, 0x65, 0x14);
mipi_dsi_dcs_write_seq(dsi, 0xfa, 0x08, 0x08, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x81);
mipi_dsi_dcs_write_seq(dsi, 0x65, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x0f);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x82);
mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x51, 0x83);
mipi_dsi_dcs_write_seq(dsi, 0x65, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x65, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x9a);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
msleep(20);
return 0;
}
static int visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
msleep(20);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(120);
return 0;
}
static int visionox_vtdr6130_prepare(struct drm_panel *panel)
{
struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return ret;
visionox_vtdr6130_reset(ctx);
ret = visionox_vtdr6130_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return ret;
}
ctx->prepared = true;
return 0;
}
static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
{
struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (!ctx->prepared)
return 0;
ret = visionox_vtdr6130_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
ctx->prepared = false;
return 0;
}
static const struct drm_display_mode visionox_vtdr6130_mode = {
.clock = (1080 + 20 + 2 + 20) * (2400 + 20 + 2 + 18) * 144 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 20,
.hsync_end = 1080 + 20 + 2,
.htotal = 1080 + 20 + 2 + 20,
.vdisplay = 2400,
.vsync_start = 2400 + 20,
.vsync_end = 2400 + 20 + 2,
.vtotal = 2400 + 20 + 2 + 18,
.width_mm = 71,
.height_mm = 157,
};
static int visionox_vtdr6130_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &visionox_vtdr6130_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs visionox_vtdr6130_panel_funcs = {
.prepare = visionox_vtdr6130_prepare,
.unprepare = visionox_vtdr6130_unprepare,
.get_modes = visionox_vtdr6130_get_modes,
};
static int visionox_vtdr6130_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
}
static const struct backlight_ops visionox_vtdr6130_bl_ops = {
.update_status = visionox_vtdr6130_bl_update_status,
};
static struct backlight_device *
visionox_vtdr6130_create_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = 4095,
.max_brightness = 4095,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&visionox_vtdr6130_bl_ops, &props);
}
static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct visionox_vtdr6130 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "vci";
ctx->supplies[2].supply = "vdd";
ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return ret;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
"Failed to create backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void visionox_vtdr6130_remove(struct mipi_dsi_device *dsi)
{
struct visionox_vtdr6130 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id visionox_vtdr6130_of_match[] = {
{ .compatible = "visionox,vtdr6130" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, visionox_vtdr6130_of_match);
static struct mipi_dsi_driver visionox_vtdr6130_driver = {
.probe = visionox_vtdr6130_probe,
.remove = visionox_vtdr6130_remove,
.driver = {
.name = "panel-visionox-vtdr6130",
.of_match_table = visionox_vtdr6130_of_match,
},
};
module_mipi_dsi_driver(visionox_vtdr6130_driver);
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_DESCRIPTION("Panel driver for the Visionox VTDR6130 AMOLED DSI panel");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-visionox-vtdr6130.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct kingdisplay_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
struct regulator *supply;
struct gpio_desc *enable_gpio;
bool prepared;
bool enabled;
};
struct kingdisplay_panel_cmd {
char cmd;
char data;
};
/*
* According to the discussion on
* https://review.coreboot.org/#/c/coreboot/+/22472/
* the panel init array is not part of the panels datasheet but instead
* just came in this form from the panel vendor.
*/
static const struct kingdisplay_panel_cmd init_code[] = {
/* voltage setting */
{ 0xB0, 0x00 },
{ 0xB2, 0x02 },
{ 0xB3, 0x11 },
{ 0xB4, 0x00 },
{ 0xB6, 0x80 },
/* VCOM disable */
{ 0xB7, 0x02 },
{ 0xB8, 0x80 },
{ 0xBA, 0x43 },
/* VCOM setting */
{ 0xBB, 0x53 },
/* VSP setting */
{ 0xBC, 0x0A },
/* VSN setting */
{ 0xBD, 0x4A },
/* VGH setting */
{ 0xBE, 0x2F },
/* VGL setting */
{ 0xBF, 0x1A },
{ 0xF0, 0x39 },
{ 0xF1, 0x22 },
/* Gamma setting */
{ 0xB0, 0x02 },
{ 0xC0, 0x00 },
{ 0xC1, 0x01 },
{ 0xC2, 0x0B },
{ 0xC3, 0x15 },
{ 0xC4, 0x22 },
{ 0xC5, 0x11 },
{ 0xC6, 0x15 },
{ 0xC7, 0x19 },
{ 0xC8, 0x1A },
{ 0xC9, 0x16 },
{ 0xCA, 0x18 },
{ 0xCB, 0x13 },
{ 0xCC, 0x18 },
{ 0xCD, 0x13 },
{ 0xCE, 0x1C },
{ 0xCF, 0x19 },
{ 0xD0, 0x21 },
{ 0xD1, 0x2C },
{ 0xD2, 0x2F },
{ 0xD3, 0x30 },
{ 0xD4, 0x19 },
{ 0xD5, 0x1F },
{ 0xD6, 0x00 },
{ 0xD7, 0x01 },
{ 0xD8, 0x0B },
{ 0xD9, 0x15 },
{ 0xDA, 0x22 },
{ 0xDB, 0x11 },
{ 0xDC, 0x15 },
{ 0xDD, 0x19 },
{ 0xDE, 0x1A },
{ 0xDF, 0x16 },
{ 0xE0, 0x18 },
{ 0xE1, 0x13 },
{ 0xE2, 0x18 },
{ 0xE3, 0x13 },
{ 0xE4, 0x1C },
{ 0xE5, 0x19 },
{ 0xE6, 0x21 },
{ 0xE7, 0x2C },
{ 0xE8, 0x2F },
{ 0xE9, 0x30 },
{ 0xEA, 0x19 },
{ 0xEB, 0x1F },
/* GOA MUX setting */
{ 0xB0, 0x01 },
{ 0xC0, 0x10 },
{ 0xC1, 0x0F },
{ 0xC2, 0x0E },
{ 0xC3, 0x0D },
{ 0xC4, 0x0C },
{ 0xC5, 0x0B },
{ 0xC6, 0x0A },
{ 0xC7, 0x09 },
{ 0xC8, 0x08 },
{ 0xC9, 0x07 },
{ 0xCA, 0x06 },
{ 0xCB, 0x05 },
{ 0xCC, 0x00 },
{ 0xCD, 0x01 },
{ 0xCE, 0x02 },
{ 0xCF, 0x03 },
{ 0xD0, 0x04 },
{ 0xD6, 0x10 },
{ 0xD7, 0x0F },
{ 0xD8, 0x0E },
{ 0xD9, 0x0D },
{ 0xDA, 0x0C },
{ 0xDB, 0x0B },
{ 0xDC, 0x0A },
{ 0xDD, 0x09 },
{ 0xDE, 0x08 },
{ 0xDF, 0x07 },
{ 0xE0, 0x06 },
{ 0xE1, 0x05 },
{ 0xE2, 0x00 },
{ 0xE3, 0x01 },
{ 0xE4, 0x02 },
{ 0xE5, 0x03 },
{ 0xE6, 0x04 },
{ 0xE7, 0x00 },
{ 0xEC, 0xC0 },
/* GOA timing setting */
{ 0xB0, 0x03 },
{ 0xC0, 0x01 },
{ 0xC2, 0x6F },
{ 0xC3, 0x6F },
{ 0xC5, 0x36 },
{ 0xC8, 0x08 },
{ 0xC9, 0x04 },
{ 0xCA, 0x41 },
{ 0xCC, 0x43 },
{ 0xCF, 0x60 },
{ 0xD2, 0x04 },
{ 0xD3, 0x04 },
{ 0xD4, 0x03 },
{ 0xD5, 0x02 },
{ 0xD6, 0x01 },
{ 0xD7, 0x00 },
{ 0xDB, 0x01 },
{ 0xDE, 0x36 },
{ 0xE6, 0x6F },
{ 0xE7, 0x6F },
/* GOE setting */
{ 0xB0, 0x06 },
{ 0xB8, 0xA5 },
{ 0xC0, 0xA5 },
{ 0xD5, 0x3F },
};
static inline
struct kingdisplay_panel *to_kingdisplay_panel(struct drm_panel *panel)
{
return container_of(panel, struct kingdisplay_panel, base);
}
static int kingdisplay_panel_disable(struct drm_panel *panel)
{
struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
int err;
if (!kingdisplay->enabled)
return 0;
err = mipi_dsi_dcs_set_display_off(kingdisplay->link);
if (err < 0)
dev_err(panel->dev, "failed to set display off: %d\n", err);
kingdisplay->enabled = false;
return 0;
}
static int kingdisplay_panel_unprepare(struct drm_panel *panel)
{
struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
int err;
if (!kingdisplay->prepared)
return 0;
err = mipi_dsi_dcs_enter_sleep_mode(kingdisplay->link);
if (err < 0) {
dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
return err;
}
/* T15: 120ms */
msleep(120);
gpiod_set_value_cansleep(kingdisplay->enable_gpio, 0);
err = regulator_disable(kingdisplay->supply);
if (err < 0)
return err;
kingdisplay->prepared = false;
return 0;
}
static int kingdisplay_panel_prepare(struct drm_panel *panel)
{
struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
int err, regulator_err;
unsigned int i;
if (kingdisplay->prepared)
return 0;
gpiod_set_value_cansleep(kingdisplay->enable_gpio, 0);
err = regulator_enable(kingdisplay->supply);
if (err < 0)
return err;
/* T2: 15ms */
usleep_range(15000, 16000);
gpiod_set_value_cansleep(kingdisplay->enable_gpio, 1);
/* T4: 15ms */
usleep_range(15000, 16000);
for (i = 0; i < ARRAY_SIZE(init_code); i++) {
err = mipi_dsi_generic_write(kingdisplay->link, &init_code[i],
sizeof(struct kingdisplay_panel_cmd));
if (err < 0) {
dev_err(panel->dev, "failed write init cmds: %d\n", err);
goto poweroff;
}
}
err = mipi_dsi_dcs_exit_sleep_mode(kingdisplay->link);
if (err < 0) {
dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
/* T6: 120ms */
msleep(120);
err = mipi_dsi_dcs_set_display_on(kingdisplay->link);
if (err < 0) {
dev_err(panel->dev, "failed to set display on: %d\n", err);
goto poweroff;
}
/* T7: 10ms */
usleep_range(10000, 11000);
kingdisplay->prepared = true;
return 0;
poweroff:
gpiod_set_value_cansleep(kingdisplay->enable_gpio, 0);
regulator_err = regulator_disable(kingdisplay->supply);
if (regulator_err)
dev_err(panel->dev, "failed to disable regulator: %d\n", regulator_err);
return err;
}
static int kingdisplay_panel_enable(struct drm_panel *panel)
{
struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
if (kingdisplay->enabled)
return 0;
kingdisplay->enabled = true;
return 0;
}
static const struct drm_display_mode default_mode = {
.clock = 229000,
.hdisplay = 1536,
.hsync_start = 1536 + 100,
.hsync_end = 1536 + 100 + 24,
.htotal = 1536 + 100 + 24 + 100,
.vdisplay = 2048,
.vsync_start = 2048 + 95,
.vsync_end = 2048 + 95 + 2,
.vtotal = 2048 + 95 + 2 + 23,
};
static int kingdisplay_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 147;
connector->display_info.height_mm = 196;
connector->display_info.bpc = 8;
return 1;
}
static const struct drm_panel_funcs kingdisplay_panel_funcs = {
.disable = kingdisplay_panel_disable,
.unprepare = kingdisplay_panel_unprepare,
.prepare = kingdisplay_panel_prepare,
.enable = kingdisplay_panel_enable,
.get_modes = kingdisplay_panel_get_modes,
};
static const struct of_device_id kingdisplay_of_match[] = {
{ .compatible = "kingdisplay,kd097d04", },
{ }
};
MODULE_DEVICE_TABLE(of, kingdisplay_of_match);
static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
{
struct device *dev = &kingdisplay->link->dev;
int err;
kingdisplay->supply = devm_regulator_get(dev, "power");
if (IS_ERR(kingdisplay->supply))
return PTR_ERR(kingdisplay->supply);
kingdisplay->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(kingdisplay->enable_gpio)) {
err = PTR_ERR(kingdisplay->enable_gpio);
dev_dbg(dev, "failed to get enable gpio: %d\n", err);
kingdisplay->enable_gpio = NULL;
}
drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
&kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
err = drm_panel_of_backlight(&kingdisplay->base);
if (err)
return err;
drm_panel_add(&kingdisplay->base);
return 0;
}
static void kingdisplay_panel_del(struct kingdisplay_panel *kingdisplay)
{
drm_panel_remove(&kingdisplay->base);
}
static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay;
int err;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL);
if (!kingdisplay)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, kingdisplay);
kingdisplay->link = dsi;
err = kingdisplay_panel_add(kingdisplay);
if (err < 0)
return err;
err = mipi_dsi_attach(dsi);
if (err < 0) {
kingdisplay_panel_del(kingdisplay);
return err;
}
return 0;
}
static void kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
int err;
err = drm_panel_unprepare(&kingdisplay->base);
if (err < 0)
dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
err = drm_panel_disable(&kingdisplay->base);
if (err < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
err = mipi_dsi_detach(dsi);
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
kingdisplay_panel_del(kingdisplay);
}
static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
drm_panel_unprepare(&kingdisplay->base);
drm_panel_disable(&kingdisplay->base);
}
static struct mipi_dsi_driver kingdisplay_panel_driver = {
.driver = {
.name = "panel-kingdisplay-kd097d04",
.of_match_table = kingdisplay_of_match,
},
.probe = kingdisplay_panel_probe,
.remove = kingdisplay_panel_remove,
.shutdown = kingdisplay_panel_shutdown,
};
module_mipi_dsi_driver(kingdisplay_panel_driver);
MODULE_AUTHOR("Chris Zhong <[email protected]>");
MODULE_AUTHOR("Nickey Yang <[email protected]>");
MODULE_DESCRIPTION("kingdisplay KD097D04 panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 NVIDIA Corporation
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
struct sharp_panel {
struct drm_panel base;
/* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
struct mipi_dsi_device *link1;
struct mipi_dsi_device *link2;
struct regulator *supply;
bool prepared;
bool enabled;
const struct drm_display_mode *mode;
};
static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
{
return container_of(panel, struct sharp_panel, base);
}
static void sharp_wait_frames(struct sharp_panel *sharp, unsigned int frames)
{
unsigned int refresh = drm_mode_vrefresh(sharp->mode);
if (WARN_ON(frames > refresh))
return;
msleep(1000 / (refresh / frames));
}
static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value)
{
u8 payload[3] = { offset >> 8, offset & 0xff, value };
struct mipi_dsi_device *dsi = sharp->link1;
ssize_t err;
err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
if (err < 0) {
dev_err(&dsi->dev, "failed to write %02x to %04x: %zd\n",
value, offset, err);
return err;
}
err = mipi_dsi_dcs_nop(dsi);
if (err < 0) {
dev_err(&dsi->dev, "failed to send DCS nop: %zd\n", err);
return err;
}
usleep_range(10, 20);
return 0;
}
static __maybe_unused int sharp_panel_read(struct sharp_panel *sharp,
u16 offset, u8 *value)
{
ssize_t err;
cpu_to_be16s(&offset);
err = mipi_dsi_generic_read(sharp->link1, &offset, sizeof(offset),
value, sizeof(*value));
if (err < 0)
dev_err(&sharp->link1->dev, "failed to read from %04x: %zd\n",
offset, err);
return err;
}
static int sharp_panel_disable(struct drm_panel *panel)
{
struct sharp_panel *sharp = to_sharp_panel(panel);
if (!sharp->enabled)
return 0;
sharp->enabled = false;
return 0;
}
static int sharp_panel_unprepare(struct drm_panel *panel)
{
struct sharp_panel *sharp = to_sharp_panel(panel);
int err;
if (!sharp->prepared)
return 0;
sharp_wait_frames(sharp, 4);
err = mipi_dsi_dcs_set_display_off(sharp->link1);
if (err < 0)
dev_err(panel->dev, "failed to set display off: %d\n", err);
err = mipi_dsi_dcs_enter_sleep_mode(sharp->link1);
if (err < 0)
dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
msleep(120);
regulator_disable(sharp->supply);
sharp->prepared = false;
return 0;
}
static int sharp_setup_symmetrical_split(struct mipi_dsi_device *left,
struct mipi_dsi_device *right,
const struct drm_display_mode *mode)
{
int err;
err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
if (err < 0) {
dev_err(&left->dev, "failed to set column address: %d\n", err);
return err;
}
err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
if (err < 0) {
dev_err(&left->dev, "failed to set page address: %d\n", err);
return err;
}
err = mipi_dsi_dcs_set_column_address(right, mode->hdisplay / 2,
mode->hdisplay - 1);
if (err < 0) {
dev_err(&right->dev, "failed to set column address: %d\n", err);
return err;
}
err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
if (err < 0) {
dev_err(&right->dev, "failed to set page address: %d\n", err);
return err;
}
return 0;
}
static int sharp_panel_prepare(struct drm_panel *panel)
{
struct sharp_panel *sharp = to_sharp_panel(panel);
u8 format = MIPI_DCS_PIXEL_FMT_24BIT;
int err;
if (sharp->prepared)
return 0;
err = regulator_enable(sharp->supply);
if (err < 0)
return err;
/*
* According to the datasheet, the panel needs around 10 ms to fully
* power up. At least another 120 ms is required before exiting sleep
* mode to make sure the panel is ready. Throw in another 20 ms for
* good measure.
*/
msleep(150);
err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1);
if (err < 0) {
dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
/*
* The MIPI DCS specification mandates this delay only between the
* exit_sleep_mode and enter_sleep_mode commands, so it isn't strictly
* necessary here.
*/
/*
msleep(120);
*/
/* set left-right mode */
err = sharp_panel_write(sharp, 0x1000, 0x2a);
if (err < 0) {
dev_err(panel->dev, "failed to set left-right mode: %d\n", err);
goto poweroff;
}
/* enable command mode */
err = sharp_panel_write(sharp, 0x1001, 0x01);
if (err < 0) {
dev_err(panel->dev, "failed to enable command mode: %d\n", err);
goto poweroff;
}
err = mipi_dsi_dcs_set_pixel_format(sharp->link1, format);
if (err < 0) {
dev_err(panel->dev, "failed to set pixel format: %d\n", err);
goto poweroff;
}
/*
* TODO: The device supports both left-right and even-odd split
* configurations, but this driver currently supports only the left-
* right split. To support a different mode a mechanism needs to be
* put in place to communicate the configuration back to the DSI host
* controller.
*/
err = sharp_setup_symmetrical_split(sharp->link1, sharp->link2,
sharp->mode);
if (err < 0) {
dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
err);
goto poweroff;
}
err = mipi_dsi_dcs_set_display_on(sharp->link1);
if (err < 0) {
dev_err(panel->dev, "failed to set display on: %d\n", err);
goto poweroff;
}
sharp->prepared = true;
/* wait for 6 frames before continuing */
sharp_wait_frames(sharp, 6);
return 0;
poweroff:
regulator_disable(sharp->supply);
return err;
}
static int sharp_panel_enable(struct drm_panel *panel)
{
struct sharp_panel *sharp = to_sharp_panel(panel);
if (sharp->enabled)
return 0;
sharp->enabled = true;
return 0;
}
static const struct drm_display_mode default_mode = {
.clock = 278000,
.hdisplay = 2560,
.hsync_start = 2560 + 128,
.hsync_end = 2560 + 128 + 64,
.htotal = 2560 + 128 + 64 + 64,
.vdisplay = 1600,
.vsync_start = 1600 + 4,
.vsync_end = 1600 + 4 + 8,
.vtotal = 1600 + 4 + 8 + 32,
};
static int sharp_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 217;
connector->display_info.height_mm = 136;
return 1;
}
static const struct drm_panel_funcs sharp_panel_funcs = {
.disable = sharp_panel_disable,
.unprepare = sharp_panel_unprepare,
.prepare = sharp_panel_prepare,
.enable = sharp_panel_enable,
.get_modes = sharp_panel_get_modes,
};
static const struct of_device_id sharp_of_match[] = {
{ .compatible = "sharp,lq101r1sx01", },
{ }
};
MODULE_DEVICE_TABLE(of, sharp_of_match);
static int sharp_panel_add(struct sharp_panel *sharp)
{
int ret;
sharp->mode = &default_mode;
sharp->supply = devm_regulator_get(&sharp->link1->dev, "power");
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&sharp->base);
if (ret)
return ret;
drm_panel_add(&sharp->base);
return 0;
}
static void sharp_panel_del(struct sharp_panel *sharp)
{
if (sharp->base.dev)
drm_panel_remove(&sharp->base);
if (sharp->link2)
put_device(&sharp->link2->dev);
}
static int sharp_panel_probe(struct mipi_dsi_device *dsi)
{
struct mipi_dsi_device *secondary = NULL;
struct sharp_panel *sharp;
struct device_node *np;
int err;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_LPM;
/* Find DSI-LINK1 */
np = of_parse_phandle(dsi->dev.of_node, "link2", 0);
if (np) {
secondary = of_find_mipi_dsi_device_by_node(np);
of_node_put(np);
if (!secondary)
return -EPROBE_DEFER;
}
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
if (!sharp) {
put_device(&secondary->dev);
return -ENOMEM;
}
mipi_dsi_set_drvdata(dsi, sharp);
sharp->link2 = secondary;
sharp->link1 = dsi;
err = sharp_panel_add(sharp);
if (err < 0) {
put_device(&secondary->dev);
return err;
}
}
err = mipi_dsi_attach(dsi);
if (err < 0) {
if (secondary)
sharp_panel_del(sharp);
return err;
}
return 0;
}
static void sharp_panel_remove(struct mipi_dsi_device *dsi)
{
struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
int err;
/* only detach from host for the DSI-LINK2 interface */
if (!sharp) {
mipi_dsi_detach(dsi);
return;
}
err = drm_panel_disable(&sharp->base);
if (err < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
err = mipi_dsi_detach(dsi);
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
sharp_panel_del(sharp);
}
static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
/* nothing to do for DSI-LINK2 */
if (!sharp)
return;
drm_panel_disable(&sharp->base);
}
static struct mipi_dsi_driver sharp_panel_driver = {
.driver = {
.name = "panel-sharp-lq101r1sx01",
.of_match_table = sharp_of_match,
},
.probe = sharp_panel_probe,
.remove = sharp_panel_remove,
.shutdown = sharp_panel_shutdown,
};
module_mipi_dsi_driver(sharp_panel_driver);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("Sharp LQ101R1SX01 panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Raydium RM67191 MIPI-DSI panel driver
*
* Copyright 2019 NXP
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
/* Panel specific color-format bits */
#define COL_FMT_16BPP 0x55
#define COL_FMT_18BPP 0x66
#define COL_FMT_24BPP 0x77
/* Write Manufacture Command Set Control */
#define WRMAUCCTR 0xFE
/* Manufacturer Command Set pages (CMD2) */
struct cmd_set_entry {
u8 cmd;
u8 param;
};
/*
* There is no description in the Reference Manual about these commands.
* We received them from vendor, so just use them as is.
*/
static const struct cmd_set_entry manufacturer_cmd_set[] = {
{0xFE, 0x0B},
{0x28, 0x40},
{0x29, 0x4F},
{0xFE, 0x0E},
{0x4B, 0x00},
{0x4C, 0x0F},
{0x4D, 0x20},
{0x4E, 0x40},
{0x4F, 0x60},
{0x50, 0xA0},
{0x51, 0xC0},
{0x52, 0xE0},
{0x53, 0xFF},
{0xFE, 0x0D},
{0x18, 0x08},
{0x42, 0x00},
{0x08, 0x41},
{0x46, 0x02},
{0x72, 0x09},
{0xFE, 0x0A},
{0x24, 0x17},
{0x04, 0x07},
{0x1A, 0x0C},
{0x0F, 0x44},
{0xFE, 0x04},
{0x00, 0x0C},
{0x05, 0x08},
{0x06, 0x08},
{0x08, 0x08},
{0x09, 0x08},
{0x0A, 0xE6},
{0x0B, 0x8C},
{0x1A, 0x12},
{0x1E, 0xE0},
{0x29, 0x93},
{0x2A, 0x93},
{0x2F, 0x02},
{0x31, 0x02},
{0x33, 0x05},
{0x37, 0x2D},
{0x38, 0x2D},
{0x3A, 0x1E},
{0x3B, 0x1E},
{0x3D, 0x27},
{0x3F, 0x80},
{0x40, 0x40},
{0x41, 0xE0},
{0x4F, 0x2F},
{0x50, 0x1E},
{0xFE, 0x06},
{0x00, 0xCC},
{0x05, 0x05},
{0x07, 0xA2},
{0x08, 0xCC},
{0x0D, 0x03},
{0x0F, 0xA2},
{0x32, 0xCC},
{0x37, 0x05},
{0x39, 0x83},
{0x3A, 0xCC},
{0x41, 0x04},
{0x43, 0x83},
{0x44, 0xCC},
{0x49, 0x05},
{0x4B, 0xA2},
{0x4C, 0xCC},
{0x51, 0x03},
{0x53, 0xA2},
{0x75, 0xCC},
{0x7A, 0x03},
{0x7C, 0x83},
{0x7D, 0xCC},
{0x82, 0x02},
{0x84, 0x83},
{0x85, 0xEC},
{0x86, 0x0F},
{0x87, 0xFF},
{0x88, 0x00},
{0x8A, 0x02},
{0x8C, 0xA2},
{0x8D, 0xEA},
{0x8E, 0x01},
{0x8F, 0xE8},
{0xFE, 0x06},
{0x90, 0x0A},
{0x92, 0x06},
{0x93, 0xA0},
{0x94, 0xA8},
{0x95, 0xEC},
{0x96, 0x0F},
{0x97, 0xFF},
{0x98, 0x00},
{0x9A, 0x02},
{0x9C, 0xA2},
{0xAC, 0x04},
{0xFE, 0x06},
{0xB1, 0x12},
{0xB2, 0x17},
{0xB3, 0x17},
{0xB4, 0x17},
{0xB5, 0x17},
{0xB6, 0x11},
{0xB7, 0x08},
{0xB8, 0x09},
{0xB9, 0x06},
{0xBA, 0x07},
{0xBB, 0x17},
{0xBC, 0x17},
{0xBD, 0x17},
{0xBE, 0x17},
{0xBF, 0x17},
{0xC0, 0x17},
{0xC1, 0x17},
{0xC2, 0x17},
{0xC3, 0x17},
{0xC4, 0x0F},
{0xC5, 0x0E},
{0xC6, 0x00},
{0xC7, 0x01},
{0xC8, 0x10},
{0xFE, 0x06},
{0x95, 0xEC},
{0x8D, 0xEE},
{0x44, 0xEC},
{0x4C, 0xEC},
{0x32, 0xEC},
{0x3A, 0xEC},
{0x7D, 0xEC},
{0x75, 0xEC},
{0x00, 0xEC},
{0x08, 0xEC},
{0x85, 0xEC},
{0xA6, 0x21},
{0xA7, 0x05},
{0xA9, 0x06},
{0x82, 0x06},
{0x41, 0x06},
{0x7A, 0x07},
{0x37, 0x07},
{0x05, 0x06},
{0x49, 0x06},
{0x0D, 0x04},
{0x51, 0x04},
};
static const u32 rad_bus_formats[] = {
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB666_1X18,
MEDIA_BUS_FMT_RGB565_1X16,
};
static const u32 rad_bus_flags = DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
struct rad_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset;
struct backlight_device *backlight;
struct regulator_bulk_data *supplies;
unsigned int num_supplies;
bool prepared;
bool enabled;
};
static const struct drm_display_mode default_mode = {
.clock = 132000,
.hdisplay = 1080,
.hsync_start = 1080 + 20,
.hsync_end = 1080 + 20 + 2,
.htotal = 1080 + 20 + 2 + 34,
.vdisplay = 1920,
.vsync_start = 1920 + 10,
.vsync_end = 1920 + 10 + 2,
.vtotal = 1920 + 10 + 2 + 4,
.width_mm = 68,
.height_mm = 121,
.flags = DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_NVSYNC,
};
static inline struct rad_panel *to_rad_panel(struct drm_panel *panel)
{
return container_of(panel, struct rad_panel, panel);
}
static int rad_panel_push_cmd_list(struct mipi_dsi_device *dsi)
{
size_t i;
size_t count = ARRAY_SIZE(manufacturer_cmd_set);
int ret = 0;
for (i = 0; i < count; i++) {
const struct cmd_set_entry *entry = &manufacturer_cmd_set[i];
u8 buffer[2] = { entry->cmd, entry->param };
ret = mipi_dsi_generic_write(dsi, &buffer, sizeof(buffer));
if (ret < 0)
return ret;
}
return ret;
};
static int color_format_from_dsi_format(enum mipi_dsi_pixel_format format)
{
switch (format) {
case MIPI_DSI_FMT_RGB565:
return COL_FMT_16BPP;
case MIPI_DSI_FMT_RGB666:
case MIPI_DSI_FMT_RGB666_PACKED:
return COL_FMT_18BPP;
case MIPI_DSI_FMT_RGB888:
return COL_FMT_24BPP;
default:
return COL_FMT_24BPP; /* for backward compatibility */
}
};
static int rad_panel_prepare(struct drm_panel *panel)
{
struct rad_panel *rad = to_rad_panel(panel);
int ret;
if (rad->prepared)
return 0;
ret = regulator_bulk_enable(rad->num_supplies, rad->supplies);
if (ret)
return ret;
if (rad->reset) {
gpiod_set_value_cansleep(rad->reset, 1);
usleep_range(3000, 5000);
gpiod_set_value_cansleep(rad->reset, 0);
usleep_range(18000, 20000);
}
rad->prepared = true;
return 0;
}
static int rad_panel_unprepare(struct drm_panel *panel)
{
struct rad_panel *rad = to_rad_panel(panel);
int ret;
if (!rad->prepared)
return 0;
/*
* Right after asserting the reset, we need to release it, so that the
* touch driver can have an active connection with the touch controller
* even after the display is turned off.
*/
if (rad->reset) {
gpiod_set_value_cansleep(rad->reset, 1);
usleep_range(15000, 17000);
gpiod_set_value_cansleep(rad->reset, 0);
}
ret = regulator_bulk_disable(rad->num_supplies, rad->supplies);
if (ret)
return ret;
rad->prepared = false;
return 0;
}
static int rad_panel_enable(struct drm_panel *panel)
{
struct rad_panel *rad = to_rad_panel(panel);
struct mipi_dsi_device *dsi = rad->dsi;
struct device *dev = &dsi->dev;
int color_format = color_format_from_dsi_format(dsi->format);
int ret;
if (rad->enabled)
return 0;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = rad_panel_push_cmd_list(dsi);
if (ret < 0) {
dev_err(dev, "Failed to send MCS (%d)\n", ret);
goto fail;
}
/* Select User Command Set table (CMD1) */
ret = mipi_dsi_generic_write(dsi, (u8[]){ WRMAUCCTR, 0x00 }, 2);
if (ret < 0)
goto fail;
/* Software reset */
ret = mipi_dsi_dcs_soft_reset(dsi);
if (ret < 0) {
dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
goto fail;
}
usleep_range(15000, 17000);
/* Set DSI mode */
ret = mipi_dsi_generic_write(dsi, (u8[]){ 0xC2, 0x0B }, 2);
if (ret < 0) {
dev_err(dev, "Failed to set DSI mode (%d)\n", ret);
goto fail;
}
/* Set tear ON */
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
dev_err(dev, "Failed to set tear ON (%d)\n", ret);
goto fail;
}
/* Set tear scanline */
ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x380);
if (ret < 0) {
dev_err(dev, "Failed to set tear scanline (%d)\n", ret);
goto fail;
}
/* Set pixel format */
ret = mipi_dsi_dcs_set_pixel_format(dsi, color_format);
dev_dbg(dev, "Interface color format set to 0x%x\n", color_format);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format (%d)\n", ret);
goto fail;
}
/* Exit sleep mode */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
goto fail;
}
usleep_range(5000, 7000);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display ON (%d)\n", ret);
goto fail;
}
backlight_enable(rad->backlight);
rad->enabled = true;
return 0;
fail:
gpiod_set_value_cansleep(rad->reset, 1);
return ret;
}
static int rad_panel_disable(struct drm_panel *panel)
{
struct rad_panel *rad = to_rad_panel(panel);
struct mipi_dsi_device *dsi = rad->dsi;
struct device *dev = &dsi->dev;
int ret;
if (!rad->enabled)
return 0;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
backlight_disable(rad->backlight);
usleep_range(10000, 12000);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display OFF (%d)\n", ret);
return ret;
}
usleep_range(5000, 10000);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
return ret;
}
rad->enabled = false;
return 0;
}
static int rad_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
connector->display_info.bus_flags = rad_bus_flags;
drm_display_info_set_bus_formats(&connector->display_info,
rad_bus_formats,
ARRAY_SIZE(rad_bus_formats));
return 1;
}
static int rad_bl_get_brightness(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
u16 brightness;
int ret;
if (!rad->prepared)
return 0;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
if (ret < 0)
return ret;
bl->props.brightness = brightness;
return brightness & 0xff;
}
static int rad_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
int ret = 0;
if (!rad->prepared)
return 0;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
if (ret < 0)
return ret;
return 0;
}
static const struct backlight_ops rad_bl_ops = {
.update_status = rad_bl_update_status,
.get_brightness = rad_bl_get_brightness,
};
static const struct drm_panel_funcs rad_panel_funcs = {
.prepare = rad_panel_prepare,
.unprepare = rad_panel_unprepare,
.enable = rad_panel_enable,
.disable = rad_panel_disable,
.get_modes = rad_panel_get_modes,
};
static const char * const rad_supply_names[] = {
"v3p3",
"v1p8",
};
static int rad_init_regulators(struct rad_panel *rad)
{
struct device *dev = &rad->dsi->dev;
int i;
rad->num_supplies = ARRAY_SIZE(rad_supply_names);
rad->supplies = devm_kcalloc(dev, rad->num_supplies,
sizeof(*rad->supplies), GFP_KERNEL);
if (!rad->supplies)
return -ENOMEM;
for (i = 0; i < rad->num_supplies; i++)
rad->supplies[i].supply = rad_supply_names[i];
return devm_regulator_bulk_get(dev, rad->num_supplies, rad->supplies);
};
static int rad_panel_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct device_node *np = dev->of_node;
struct rad_panel *panel;
struct backlight_properties bl_props;
int ret;
u32 video_mode;
panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, panel);
panel->dsi = dsi;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO;
ret = of_property_read_u32(np, "video-mode", &video_mode);
if (!ret) {
switch (video_mode) {
case 0:
/* burst mode */
dsi->mode_flags |= MIPI_DSI_MODE_VIDEO_BURST;
break;
case 1:
/* non-burst mode with sync event */
break;
case 2:
/* non-burst mode with sync pulse */
dsi->mode_flags |= MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
break;
default:
dev_warn(dev, "invalid video mode %d\n", video_mode);
break;
}
}
ret = of_property_read_u32(np, "dsi-lanes", &dsi->lanes);
if (ret) {
dev_err(dev, "Failed to get dsi-lanes property (%d)\n", ret);
return ret;
}
panel->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(panel->reset))
return PTR_ERR(panel->reset);
memset(&bl_props, 0, sizeof(bl_props));
bl_props.type = BACKLIGHT_RAW;
bl_props.brightness = 255;
bl_props.max_brightness = 255;
panel->backlight = devm_backlight_device_register(dev, dev_name(dev),
dev, dsi, &rad_bl_ops,
&bl_props);
if (IS_ERR(panel->backlight)) {
ret = PTR_ERR(panel->backlight);
dev_err(dev, "Failed to register backlight (%d)\n", ret);
return ret;
}
ret = rad_init_regulators(panel);
if (ret)
return ret;
drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
drm_panel_add(&panel->panel);
ret = mipi_dsi_attach(dsi);
if (ret)
drm_panel_remove(&panel->panel);
return ret;
}
static void rad_panel_remove(struct mipi_dsi_device *dsi)
{
struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
struct device *dev = &dsi->dev;
int ret;
ret = mipi_dsi_detach(dsi);
if (ret)
dev_err(dev, "Failed to detach from host (%d)\n", ret);
drm_panel_remove(&rad->panel);
}
static void rad_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
rad_panel_disable(&rad->panel);
rad_panel_unprepare(&rad->panel);
}
static const struct of_device_id rad_of_match[] = {
{ .compatible = "raydium,rm67191", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rad_of_match);
static struct mipi_dsi_driver rad_panel_driver = {
.driver = {
.name = "panel-raydium-rm67191",
.of_match_table = rad_of_match,
},
.probe = rad_panel_probe,
.remove = rad_panel_remove,
.shutdown = rad_panel_shutdown,
};
module_mipi_dsi_driver(rad_panel_driver);
MODULE_AUTHOR("Robert Chiras <[email protected]>");
MODULE_DESCRIPTION("DRM Driver for Raydium RM67191 MIPI DSI panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-raydium-rm67191.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S6E63M0 AMOLED LCD drm_panel driver.
*
* Copyright (C) 2019 Paweł Chmiel <[email protected]>
* Derived from drivers/gpu/drm/panel-samsung-ld9040.c
*
* Andrzej Hajda <[email protected]>
*/
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>
#include <video/mipi_display.h>
#include "panel-samsung-s6e63m0.h"
#define S6E63M0_LCD_ID_VALUE_M2 0xA4
#define S6E63M0_LCD_ID_VALUE_SM2 0xB4
#define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6
#define NUM_GAMMA_LEVELS 28
#define GAMMA_TABLE_COUNT 23
#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
/* array of gamma tables for gamma value 2.2 */
static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
/* 30 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0xA1, 0x51, 0x7B, 0xCE,
0xCB, 0xC2, 0xC7, 0xCB, 0xBC, 0xDA, 0xDD,
0xD3, 0x00, 0x53, 0x00, 0x52, 0x00, 0x6F, },
/* 40 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x97, 0x58, 0x71, 0xCC,
0xCB, 0xC0, 0xC5, 0xC9, 0xBA, 0xD9, 0xDC,
0xD1, 0x00, 0x5B, 0x00, 0x5A, 0x00, 0x7A, },
/* 50 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x96, 0x58, 0x72, 0xCB,
0xCA, 0xBF, 0xC6, 0xC9, 0xBA, 0xD6, 0xD9,
0xCD, 0x00, 0x61, 0x00, 0x61, 0x00, 0x83, },
/* 60 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x91, 0x5E, 0x6E, 0xC9,
0xC9, 0xBD, 0xC4, 0xC9, 0xB8, 0xD3, 0xD7,
0xCA, 0x00, 0x69, 0x00, 0x67, 0x00, 0x8D, },
/* 70 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x8E, 0x62, 0x6B, 0xC7,
0xC9, 0xBB, 0xC3, 0xC7, 0xB7, 0xD3, 0xD7,
0xCA, 0x00, 0x6E, 0x00, 0x6C, 0x00, 0x94, },
/* 80 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x89, 0x68, 0x65, 0xC9,
0xC9, 0xBC, 0xC1, 0xC5, 0xB6, 0xD2, 0xD5,
0xC9, 0x00, 0x73, 0x00, 0x72, 0x00, 0x9A, },
/* 90 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x89, 0x69, 0x64, 0xC7,
0xC8, 0xBB, 0xC0, 0xC5, 0xB4, 0xD2, 0xD5,
0xC9, 0x00, 0x77, 0x00, 0x76, 0x00, 0xA0, },
/* 100 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x86, 0x69, 0x60, 0xC6,
0xC8, 0xBA, 0xBF, 0xC4, 0xB4, 0xD0, 0xD4,
0xC6, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0xA7, },
/* 110 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x86, 0x6A, 0x60, 0xC5,
0xC7, 0xBA, 0xBD, 0xC3, 0xB2, 0xD0, 0xD4,
0xC5, 0x00, 0x80, 0x00, 0x7E, 0x00, 0xAD, },
/* 120 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x82, 0x6B, 0x5E, 0xC4,
0xC8, 0xB9, 0xBD, 0xC2, 0xB1, 0xCE, 0xD2,
0xC4, 0x00, 0x85, 0x00, 0x82, 0x00, 0xB3, },
/* 130 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x8C, 0x6C, 0x60, 0xC3,
0xC7, 0xB9, 0xBC, 0xC1, 0xAF, 0xCE, 0xD2,
0xC3, 0x00, 0x88, 0x00, 0x86, 0x00, 0xB8, },
/* 140 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x80, 0x6C, 0x5F, 0xC1,
0xC6, 0xB7, 0xBC, 0xC1, 0xAE, 0xCD, 0xD0,
0xC2, 0x00, 0x8C, 0x00, 0x8A, 0x00, 0xBE, },
/* 150 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x80, 0x6E, 0x5F, 0xC1,
0xC6, 0xB6, 0xBC, 0xC0, 0xAE, 0xCC, 0xD0,
0xC2, 0x00, 0x8F, 0x00, 0x8D, 0x00, 0xC2, },
/* 160 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x7F, 0x6E, 0x5F, 0xC0,
0xC6, 0xB5, 0xBA, 0xBF, 0xAD, 0xCB, 0xCF,
0xC0, 0x00, 0x94, 0x00, 0x91, 0x00, 0xC8, },
/* 170 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x7C, 0x6D, 0x5C, 0xC0,
0xC6, 0xB4, 0xBB, 0xBE, 0xAD, 0xCA, 0xCF,
0xC0, 0x00, 0x96, 0x00, 0x94, 0x00, 0xCC, },
/* 180 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x7B, 0x6D, 0x5B, 0xC0,
0xC5, 0xB3, 0xBA, 0xBE, 0xAD, 0xCA, 0xCE,
0xBF, 0x00, 0x99, 0x00, 0x97, 0x00, 0xD0, },
/* 190 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x7A, 0x6D, 0x59, 0xC1,
0xC5, 0xB4, 0xB8, 0xBD, 0xAC, 0xC9, 0xCE,
0xBE, 0x00, 0x9D, 0x00, 0x9A, 0x00, 0xD5, },
/* 200 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x79, 0x6D, 0x58, 0xC1,
0xC4, 0xB4, 0xB6, 0xBD, 0xAA, 0xCA, 0xCD,
0xBE, 0x00, 0x9F, 0x00, 0x9D, 0x00, 0xD9, },
/* 210 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x79, 0x6D, 0x57, 0xC0,
0xC4, 0xB4, 0xB7, 0xBD, 0xAA, 0xC8, 0xCC,
0xBD, 0x00, 0xA2, 0x00, 0xA0, 0x00, 0xDD, },
/* 220 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x78, 0x6F, 0x58, 0xBF,
0xC4, 0xB3, 0xB5, 0xBB, 0xA9, 0xC8, 0xCC,
0xBC, 0x00, 0xA6, 0x00, 0xA3, 0x00, 0xE2, },
/* 230 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x75, 0x6F, 0x56, 0xBF,
0xC3, 0xB2, 0xB6, 0xBB, 0xA8, 0xC7, 0xCB,
0xBC, 0x00, 0xA8, 0x00, 0xA6, 0x00, 0xE6, },
/* 240 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x76, 0x6F, 0x56, 0xC0,
0xC3, 0xB2, 0xB5, 0xBA, 0xA8, 0xC6, 0xCB,
0xBB, 0x00, 0xAA, 0x00, 0xA8, 0x00, 0xE9, },
/* 250 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x74, 0x6D, 0x54, 0xBF,
0xC3, 0xB2, 0xB4, 0xBA, 0xA7, 0xC6, 0xCA,
0xBA, 0x00, 0xAD, 0x00, 0xAB, 0x00, 0xED, },
/* 260 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x74, 0x6E, 0x54, 0xBD,
0xC2, 0xB0, 0xB5, 0xBA, 0xA7, 0xC5, 0xC9,
0xBA, 0x00, 0xB0, 0x00, 0xAE, 0x00, 0xF1, },
/* 270 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x71, 0x6C, 0x50, 0xBD,
0xC3, 0xB0, 0xB4, 0xB8, 0xA6, 0xC6, 0xC9,
0xBB, 0x00, 0xB2, 0x00, 0xB1, 0x00, 0xF4, },
/* 280 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x6E, 0x6C, 0x4D, 0xBE,
0xC3, 0xB1, 0xB3, 0xB8, 0xA5, 0xC6, 0xC8,
0xBB, 0x00, 0xB4, 0x00, 0xB3, 0x00, 0xF7, },
/* 290 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x71, 0x70, 0x50, 0xBD,
0xC1, 0xB0, 0xB2, 0xB8, 0xA4, 0xC6, 0xC7,
0xBB, 0x00, 0xB6, 0x00, 0xB6, 0x00, 0xFA, },
/* 300 cd */
{ MCS_PGAMMACTL, 0x02,
0x18, 0x08, 0x24, 0x70, 0x6E, 0x4E, 0xBC,
0xC0, 0xAF, 0xB3, 0xB8, 0xA5, 0xC5, 0xC7,
0xBB, 0x00, 0xB9, 0x00, 0xB8, 0x00, 0xFC, },
};
#define NUM_ACL_LEVELS 7
#define ACL_TABLE_COUNT 28
static u8 const s6e63m0_acl[NUM_ACL_LEVELS][ACL_TABLE_COUNT] = {
/* NULL ACL */
{ MCS_BCMODE,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 },
/* 40P ACL */
{ MCS_BCMODE,
0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x06, 0x0C, 0x11, 0x16, 0x1C, 0x21, 0x26,
0x2B, 0x31, 0x36 },
/* 43P ACL */
{ MCS_BCMODE,
0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x07, 0x0C, 0x12, 0x18, 0x1E, 0x23, 0x29,
0x2F, 0x34, 0x3A },
/* 45P ACL */
{ MCS_BCMODE,
0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x07, 0x0D, 0x13, 0x19, 0x1F, 0x25, 0x2B,
0x31, 0x37, 0x3D },
/* 47P ACL */
{ MCS_BCMODE,
0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x07, 0x0E, 0x14, 0x1B, 0x21, 0x27, 0x2E,
0x34, 0x3B, 0x41 },
/* 48P ACL */
{ MCS_BCMODE,
0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x08, 0x0E, 0x15, 0x1B, 0x22, 0x29, 0x2F,
0x36, 0x3C, 0x43 },
/* 50P ACL */
{ MCS_BCMODE,
0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x08, 0x0F, 0x16, 0x1D, 0x24, 0x2A, 0x31,
0x38, 0x3F, 0x46 },
};
/* This tells us which ACL level goes with which gamma */
static u8 const s6e63m0_acl_per_gamma[NUM_GAMMA_LEVELS] = {
/* 30 - 60 cd: ACL off/NULL */
0, 0, 0, 0,
/* 70 - 250 cd: 40P ACL */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
/* 260 - 300 cd: 50P ACL */
6, 6, 6, 6, 6,
};
/* The ELVSS backlight regulator has 5 levels */
#define S6E63M0_ELVSS_LEVELS 5
static u8 const s6e63m0_elvss_offsets[S6E63M0_ELVSS_LEVELS] = {
0x00, /* not set */
0x0D, /* 30 cd - 100 cd */
0x09, /* 110 cd - 160 cd */
0x07, /* 170 cd - 200 cd */
0x00, /* 210 cd - 300 cd */
};
/* This tells us which ELVSS level goes with which gamma */
static u8 const s6e63m0_elvss_per_gamma[NUM_GAMMA_LEVELS] = {
/* 30 - 100 cd */
1, 1, 1, 1, 1, 1, 1, 1,
/* 110 - 160 cd */
2, 2, 2, 2, 2, 2,
/* 170 - 200 cd */
3, 3, 3, 3,
/* 210 - 300 cd */
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
};
struct s6e63m0 {
struct device *dev;
void *transport_data;
int (*dcs_read)(struct device *dev, void *trsp, const u8 cmd, u8 *val);
int (*dcs_write)(struct device *dev, void *trsp, const u8 *data, size_t len);
struct drm_panel panel;
struct backlight_device *bl_dev;
u8 lcd_type;
u8 elvss_pulse;
bool dsi_mode;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
bool prepared;
bool enabled;
/*
* This field is tested by functions directly accessing bus before
* transfer, transfer is skipped if it is set. In case of transfer
* failure or unexpected response the field is set to error value.
* Such construct allows to eliminate many checks in higher level
* functions.
*/
int error;
};
static const struct drm_display_mode default_mode = {
.clock = 25628,
.hdisplay = 480,
.hsync_start = 480 + 16,
.hsync_end = 480 + 16 + 2,
.htotal = 480 + 16 + 2 + 16,
.vdisplay = 800,
.vsync_start = 800 + 28,
.vsync_end = 800 + 28 + 2,
.vtotal = 800 + 28 + 2 + 1,
.width_mm = 53,
.height_mm = 89,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static inline struct s6e63m0 *panel_to_s6e63m0(struct drm_panel *panel)
{
return container_of(panel, struct s6e63m0, panel);
}
static int s6e63m0_clear_error(struct s6e63m0 *ctx)
{
int ret = ctx->error;
ctx->error = 0;
return ret;
}
static void s6e63m0_dcs_read(struct s6e63m0 *ctx, const u8 cmd, u8 *data)
{
if (ctx->error < 0)
return;
ctx->error = ctx->dcs_read(ctx->dev, ctx->transport_data, cmd, data);
}
static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len)
{
if (ctx->error < 0 || len == 0)
return;
ctx->error = ctx->dcs_write(ctx->dev, ctx->transport_data, data, len);
}
#define s6e63m0_dcs_write_seq_static(ctx, seq ...) \
({ \
static const u8 d[] = { seq }; \
s6e63m0_dcs_write(ctx, d, ARRAY_SIZE(d)); \
})
static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
{
u8 id1, id2, id3;
int ret;
s6e63m0_dcs_read(ctx, MCS_READ_ID1, &id1);
s6e63m0_dcs_read(ctx, MCS_READ_ID2, &id2);
s6e63m0_dcs_read(ctx, MCS_READ_ID3, &id3);
ret = s6e63m0_clear_error(ctx);
if (ret) {
dev_err(ctx->dev, "error checking LCD type (%d)\n", ret);
ctx->lcd_type = 0x00;
return ret;
}
dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
/*
* We attempt to detect what panel is mounted on the controller.
* The third ID byte represents the desired ELVSS pulse for
* some displays.
*/
switch (id2) {
case S6E63M0_LCD_ID_VALUE_M2:
dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n");
ctx->elvss_pulse = id3;
break;
case S6E63M0_LCD_ID_VALUE_SM2:
case S6E63M0_LCD_ID_VALUE_SM2_1:
dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n");
ctx->elvss_pulse = id3;
break;
default:
dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2);
/* Default ELVSS pulse level */
ctx->elvss_pulse = 0x16;
break;
}
ctx->lcd_type = id2;
return 0;
}
static void s6e63m0_init(struct s6e63m0 *ctx)
{
/*
* We do not know why there is a difference in the DSI mode.
* (No datasheet.)
*
* In the vendor driver this sequence is called
* "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
*/
if (ctx->dsi_mode)
s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
else
s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
0x02, 0x03, 0x1c, 0x10, 0x10);
s6e63m0_dcs_write_seq_static(ctx, MCS_IFCTL,
0x03, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL,
0x00, 0x18, 0x08, 0x24, 0x64, 0x56, 0x33,
0xb6, 0xba, 0xa8, 0xac, 0xb1, 0x9d, 0xc1,
0xc1, 0xb7, 0x00, 0x9c, 0x00, 0x9f, 0x00,
0xd6);
s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL,
0x01);
s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
0x00, 0x8e, 0x07);
s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
s6e63m0_dcs_write_seq_static(ctx, MCS_BCMODE,
0x4d, 0x96, 0x1d, 0x00, 0x00, 0x01, 0xdf,
0x00, 0x00, 0x03, 0x1f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06,
0x09, 0x0d, 0x0f, 0x12, 0x15, 0x18);
s6e63m0_dcs_write_seq_static(ctx, MCS_TEMP_SWIRE,
0x10, 0x10, 0x0b, 0x05);
s6e63m0_dcs_write_seq_static(ctx, MCS_MIECTL1,
0x01);
s6e63m0_dcs_write_seq_static(ctx, MCS_ELVSS_ON,
0x0b);
}
static int s6e63m0_power_on(struct s6e63m0 *ctx)
{
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
msleep(25);
/* Be sure to send a reset pulse */
gpiod_set_value(ctx->reset_gpio, 1);
msleep(5);
gpiod_set_value(ctx->reset_gpio, 0);
msleep(120);
return 0;
}
static int s6e63m0_power_off(struct s6e63m0 *ctx)
{
int ret;
gpiod_set_value(ctx->reset_gpio, 1);
msleep(120);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
return 0;
}
static int s6e63m0_disable(struct drm_panel *panel)
{
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
if (!ctx->enabled)
return 0;
backlight_disable(ctx->bl_dev);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
msleep(10);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
msleep(120);
ctx->enabled = false;
return 0;
}
static int s6e63m0_unprepare(struct drm_panel *panel)
{
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
int ret;
if (!ctx->prepared)
return 0;
s6e63m0_clear_error(ctx);
ret = s6e63m0_power_off(ctx);
if (ret < 0)
return ret;
ctx->prepared = false;
return 0;
}
static int s6e63m0_prepare(struct drm_panel *panel)
{
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
int ret;
if (ctx->prepared)
return 0;
ret = s6e63m0_power_on(ctx);
if (ret < 0)
return ret;
/* Magic to unlock level 2 control of the display */
s6e63m0_dcs_write_seq_static(ctx, MCS_LEVEL_2_KEY, 0x5a, 0x5a);
/* Magic to unlock MTP reading */
s6e63m0_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0x5a, 0x5a);
ret = s6e63m0_check_lcd_type(ctx);
if (ret < 0)
return ret;
s6e63m0_init(ctx);
ret = s6e63m0_clear_error(ctx);
if (ret < 0)
s6e63m0_unprepare(panel);
ctx->prepared = true;
return ret;
}
static int s6e63m0_enable(struct drm_panel *panel)
{
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
if (ctx->enabled)
return 0;
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
msleep(10);
s6e63m0_dcs_write_seq_static(ctx, MCS_ERROR_CHECK,
0xE7, 0x14, 0x60, 0x17, 0x0A, 0x49, 0xC3,
0x8F, 0x19, 0x64, 0x91, 0x84, 0x76, 0x20,
0x0F, 0x00);
backlight_enable(ctx->bl_dev);
ctx->enabled = true;
return 0;
}
static int s6e63m0_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6e63m0_drm_funcs = {
.disable = s6e63m0_disable,
.unprepare = s6e63m0_unprepare,
.prepare = s6e63m0_prepare,
.enable = s6e63m0_enable,
.get_modes = s6e63m0_get_modes,
};
static int s6e63m0_set_brightness(struct backlight_device *bd)
{
struct s6e63m0 *ctx = bl_get_data(bd);
int brightness = bd->props.brightness;
u8 elvss_val;
u8 elvss_cmd_set[5];
int i;
/* Adjust ELVSS to candela level */
i = s6e63m0_elvss_per_gamma[brightness];
elvss_val = ctx->elvss_pulse + s6e63m0_elvss_offsets[i];
if (elvss_val > 0x1f)
elvss_val = 0x1f;
elvss_cmd_set[0] = MCS_TEMP_SWIRE;
elvss_cmd_set[1] = elvss_val;
elvss_cmd_set[2] = elvss_val;
elvss_cmd_set[3] = elvss_val;
elvss_cmd_set[4] = elvss_val;
s6e63m0_dcs_write(ctx, elvss_cmd_set, 5);
/* Update the ACL per gamma value */
i = s6e63m0_acl_per_gamma[brightness];
s6e63m0_dcs_write(ctx, s6e63m0_acl[i],
ARRAY_SIZE(s6e63m0_acl[i]));
/* Update gamma table */
s6e63m0_dcs_write(ctx, s6e63m0_gamma_22[brightness],
ARRAY_SIZE(s6e63m0_gamma_22[brightness]));
s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x03);
return s6e63m0_clear_error(ctx);
}
static const struct backlight_ops s6e63m0_backlight_ops = {
.update_status = s6e63m0_set_brightness,
};
static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
{
struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = max_brightness,
.max_brightness = max_brightness,
};
struct device *dev = ctx->dev;
int ret = 0;
ctx->bl_dev = devm_backlight_device_register(dev, "panel", dev, ctx,
&s6e63m0_backlight_ops,
&props);
if (IS_ERR(ctx->bl_dev)) {
ret = PTR_ERR(ctx->bl_dev);
dev_err(dev, "error registering backlight device (%d)\n", ret);
}
return ret;
}
int s6e63m0_probe(struct device *dev, void *trsp,
int (*dcs_read)(struct device *dev, void *trsp, const u8 cmd, u8 *val),
int (*dcs_write)(struct device *dev, void *trsp, const u8 *data, size_t len),
bool dsi_mode)
{
struct s6e63m0 *ctx;
u32 max_brightness;
int ret;
ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->transport_data = trsp;
ctx->dsi_mode = dsi_mode;
ctx->dcs_read = dcs_read;
ctx->dcs_write = dcs_write;
dev_set_drvdata(dev, ctx);
ctx->dev = dev;
ctx->enabled = false;
ctx->prepared = false;
ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
if (ret)
max_brightness = MAX_BRIGHTNESS;
if (max_brightness > MAX_BRIGHTNESS) {
dev_err(dev, "illegal max brightness specified\n");
max_brightness = MAX_BRIGHTNESS;
}
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
dev_err(dev, "failed to get regulators: %d\n", ret);
return ret;
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
dsi_mode ? DRM_MODE_CONNECTOR_DSI :
DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx, max_brightness);
if (ret < 0)
return ret;
drm_panel_add(&ctx->panel);
return 0;
}
EXPORT_SYMBOL_GPL(s6e63m0_probe);
void s6e63m0_remove(struct device *dev)
{
struct s6e63m0 *ctx = dev_get_drvdata(dev);
drm_panel_remove(&ctx->panel);
}
EXPORT_SYMBOL_GPL(s6e63m0_remove);
MODULE_AUTHOR("Paweł Chmiel <[email protected]>");
MODULE_DESCRIPTION("s6e63m0 LCD Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-s6e63m0.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021 Linaro Ltd.
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct sharp_ls060 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator *vddi_supply;
struct regulator *vddh_supply;
struct regulator *avdd_supply;
struct regulator *avee_supply;
struct gpio_desc *reset_gpio;
bool prepared;
};
static inline struct sharp_ls060 *to_sharp_ls060(struct drm_panel *panel)
{
return container_of(panel, struct sharp_ls060, panel);
}
static void sharp_ls060_reset(struct sharp_ls060 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(10000, 11000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(10000, 11000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(10000, 11000);
}
static int sharp_ls060_on(struct sharp_ls060 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
msleep(50);
return 0;
}
static int sharp_ls060_off(struct sharp_ls060 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
usleep_range(2000, 3000);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(121);
return 0;
}
static int sharp_ls060_prepare(struct drm_panel *panel)
{
struct sharp_ls060 *ctx = to_sharp_ls060(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_enable(ctx->vddi_supply);
if (ret < 0)
return ret;
ret = regulator_enable(ctx->avdd_supply);
if (ret < 0)
goto err_avdd;
usleep_range(1000, 2000);
ret = regulator_enable(ctx->avee_supply);
if (ret < 0)
goto err_avee;
usleep_range(10000, 11000);
ret = regulator_enable(ctx->vddh_supply);
if (ret < 0)
goto err_vddh;
usleep_range(10000, 11000);
sharp_ls060_reset(ctx);
ret = sharp_ls060_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
goto err_on;
}
ctx->prepared = true;
return 0;
err_on:
regulator_disable(ctx->vddh_supply);
usleep_range(10000, 11000);
err_vddh:
regulator_disable(ctx->avee_supply);
err_avee:
regulator_disable(ctx->avdd_supply);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
err_avdd:
regulator_disable(ctx->vddi_supply);
return ret;
}
static int sharp_ls060_unprepare(struct drm_panel *panel)
{
struct sharp_ls060 *ctx = to_sharp_ls060(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (!ctx->prepared)
return 0;
ret = sharp_ls060_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
regulator_disable(ctx->vddh_supply);
usleep_range(10000, 11000);
regulator_disable(ctx->avee_supply);
regulator_disable(ctx->avdd_supply);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->vddi_supply);
ctx->prepared = false;
return 0;
}
static const struct drm_display_mode sharp_ls060_mode = {
.clock = (1080 + 96 + 16 + 64) * (1920 + 4 + 1 + 16) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 96,
.hsync_end = 1080 + 96 + 16,
.htotal = 1080 + 96 + 16 + 64,
.vdisplay = 1920,
.vsync_start = 1920 + 4,
.vsync_end = 1920 + 4 + 1,
.vtotal = 1920 + 4 + 1 + 16,
.width_mm = 75,
.height_mm = 132,
};
static int sharp_ls060_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &sharp_ls060_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs sharp_ls060_panel_funcs = {
.prepare = sharp_ls060_prepare,
.unprepare = sharp_ls060_unprepare,
.get_modes = sharp_ls060_get_modes,
};
static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct sharp_ls060 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->vddi_supply = devm_regulator_get(dev, "vddi");
if (IS_ERR(ctx->vddi_supply))
return PTR_ERR(ctx->vddi_supply);
ctx->vddh_supply = devm_regulator_get(dev, "vddh");
if (IS_ERR(ctx->vddh_supply))
return PTR_ERR(ctx->vddh_supply);
ctx->avdd_supply = devm_regulator_get(dev, "avdd");
if (IS_ERR(ctx->avdd_supply))
return PTR_ERR(ctx->avdd_supply);
ctx->avee_supply = devm_regulator_get(dev, "avee");
if (IS_ERR(ctx->avee_supply))
return PTR_ERR(ctx->avee_supply);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void sharp_ls060_remove(struct mipi_dsi_device *dsi)
{
struct sharp_ls060 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id sharp_ls060t1sx01_of_match[] = {
{ .compatible = "sharp,ls060t1sx01" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sharp_ls060t1sx01_of_match);
static struct mipi_dsi_driver sharp_ls060_driver = {
.probe = sharp_ls060_probe,
.remove = sharp_ls060_remove,
.driver = {
.name = "panel-sharp-ls060t1sx01",
.of_match_table = sharp_ls060t1sx01_of_match,
},
};
module_mipi_dsi_driver(sharp_ls060_driver);
MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Sharp LS060T1SX01 1080p video mode dsi panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2019, Michael Srba
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct s6e88a0_ams452ef01 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
bool prepared;
};
static inline struct
s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
{
return container_of(panel, struct s6e88a0_ams452ef01, panel);
}
static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(5000, 6000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(1000, 2000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(10000, 11000);
}
static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
// set default brightness/gama
mipi_dsi_dcs_write_seq(dsi, 0xca,
0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
0x80, 0x80, 0x80, // V203 R,G,B
0x80, 0x80, 0x80, // V151 R,G,B
0x80, 0x80, 0x80, // V87 R,G,B
0x80, 0x80, 0x80, // V51 R,G,B
0x80, 0x80, 0x80, // V35 R,G,B
0x80, 0x80, 0x80, // V23 R,G,B
0x80, 0x80, 0x80, // V11 R,G,B
0x6b, 0x68, 0x71, // V3 R,G,B
0x00, 0x00, 0x00); // V1 R,G,B
// set default Amoled Off Ratio
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
msleep(35);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(120);
return 0;
}
static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
{
struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
return ret;
}
s6e88a0_ams452ef01_reset(ctx);
ret = s6e88a0_ams452ef01_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
return ret;
}
ctx->prepared = true;
return 0;
}
static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
{
struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (!ctx->prepared)
return 0;
ret = s6e88a0_ams452ef01_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
ctx->prepared = false;
return 0;
}
static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
.clock = (540 + 88 + 4 + 20) * (960 + 14 + 2 + 8) * 60 / 1000,
.hdisplay = 540,
.hsync_start = 540 + 88,
.hsync_end = 540 + 88 + 4,
.htotal = 540 + 88 + 4 + 20,
.vdisplay = 960,
.vsync_start = 960 + 14,
.vsync_end = 960 + 14 + 2,
.vtotal = 960 + 14 + 2 + 8,
.width_mm = 56,
.height_mm = 100,
};
static int s6e88a0_ams452ef01_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &s6e88a0_ams452ef01_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6e88a0_ams452ef01_panel_funcs = {
.unprepare = s6e88a0_ams452ef01_unprepare,
.prepare = s6e88a0_ams452ef01_prepare,
.get_modes = s6e88a0_ams452ef01_get_modes,
};
static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct s6e88a0_ams452ef01 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to get regulators: %d\n", ret);
return ret;
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
ret = PTR_ERR(ctx->reset_gpio);
dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
return ret;
}
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
{
struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
{ .compatible = "samsung,s6e88a0-ams452ef01" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, s6e88a0_ams452ef01_of_match);
static struct mipi_dsi_driver s6e88a0_ams452ef01_driver = {
.probe = s6e88a0_ams452ef01_probe,
.remove = s6e88a0_ams452ef01_remove,
.driver = {
.name = "panel-s6e88a0-ams452ef01",
.of_match_table = s6e88a0_ams452ef01_of_match,
},
};
module_mipi_dsi_driver(s6e88a0_ams452ef01_driver);
MODULE_AUTHOR("Michael Srba <[email protected]>");
MODULE_DESCRIPTION("MIPI-DSI based Panel Driver for AMS452EF01 AMOLED LCD with a S6E88A0 controller");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Toppoly TD043MTEA1 Panel Driver
*
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Based on the omapdrm-specific panel-tpo-td043mtea1 driver
*
* Author: Gražvydas Ignotas <[email protected]>
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <drm/drm_connector.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
#define TPO_R02_NCLK_RISING BIT(3)
#define TPO_R02_HSYNC_HIGH BIT(4)
#define TPO_R02_VSYNC_HIGH BIT(5)
#define TPO_R03_NSTANDBY BIT(0)
#define TPO_R03_EN_CP_CLK BIT(1)
#define TPO_R03_EN_VGL_PUMP BIT(2)
#define TPO_R03_EN_PWM BIT(3)
#define TPO_R03_DRIVING_CAP_100 BIT(4)
#define TPO_R03_EN_PRE_CHARGE BIT(6)
#define TPO_R03_SOFTWARE_CTL BIT(7)
#define TPO_R04_NFLIP_H BIT(0)
#define TPO_R04_NFLIP_V BIT(1)
#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
#define TPO_R04_VGL_FREQ_1H BIT(4)
#define TPO_R03_VAL_NORMAL \
(TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | TPO_R03_EN_VGL_PUMP | \
TPO_R03_EN_PWM | TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
TPO_R03_SOFTWARE_CTL)
#define TPO_R03_VAL_STANDBY \
(TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
TPO_R03_SOFTWARE_CTL)
static const u16 td043mtea1_def_gamma[12] = {
105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
};
struct td043mtea1_panel {
struct drm_panel panel;
struct spi_device *spi;
struct regulator *vcc_reg;
struct gpio_desc *reset_gpio;
unsigned int mode;
u16 gamma[12];
bool vmirror;
bool powered_on;
bool spi_suspended;
bool power_on_resume;
};
#define to_td043mtea1_device(p) container_of(p, struct td043mtea1_panel, panel)
/* -----------------------------------------------------------------------------
* Hardware Access
*/
static int td043mtea1_write(struct td043mtea1_panel *lcd, u8 addr, u8 value)
{
struct spi_message msg;
struct spi_transfer xfer;
u16 data;
int ret;
spi_message_init(&msg);
memset(&xfer, 0, sizeof(xfer));
data = ((u16)addr << 10) | (1 << 8) | value;
xfer.tx_buf = &data;
xfer.bits_per_word = 16;
xfer.len = 2;
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(lcd->spi, &msg);
if (ret < 0)
dev_warn(&lcd->spi->dev, "failed to write to LCD reg (%d)\n",
ret);
return ret;
}
static void td043mtea1_write_gamma(struct td043mtea1_panel *lcd)
{
const u16 *gamma = lcd->gamma;
unsigned int i;
u8 val;
/* gamma bits [9:8] */
for (val = i = 0; i < 4; i++)
val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
td043mtea1_write(lcd, 0x11, val);
for (val = i = 0; i < 4; i++)
val |= (gamma[i + 4] & 0x300) >> ((i + 1) * 2);
td043mtea1_write(lcd, 0x12, val);
for (val = i = 0; i < 4; i++)
val |= (gamma[i + 8] & 0x300) >> ((i + 1) * 2);
td043mtea1_write(lcd, 0x13, val);
/* gamma bits [7:0] */
for (i = 0; i < 12; i++)
td043mtea1_write(lcd, 0x14 + i, gamma[i] & 0xff);
}
static int td043mtea1_write_mirror(struct td043mtea1_panel *lcd)
{
u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
if (lcd->vmirror)
reg4 &= ~TPO_R04_NFLIP_V;
return td043mtea1_write(lcd, 4, reg4);
}
static int td043mtea1_power_on(struct td043mtea1_panel *lcd)
{
int ret;
if (lcd->powered_on)
return 0;
ret = regulator_enable(lcd->vcc_reg);
if (ret < 0)
return ret;
/* Wait for the panel to stabilize. */
msleep(160);
gpiod_set_value(lcd->reset_gpio, 0);
td043mtea1_write(lcd, 2, TPO_R02_MODE(lcd->mode) | TPO_R02_NCLK_RISING);
td043mtea1_write(lcd, 3, TPO_R03_VAL_NORMAL);
td043mtea1_write(lcd, 0x20, 0xf0);
td043mtea1_write(lcd, 0x21, 0xf0);
td043mtea1_write_mirror(lcd);
td043mtea1_write_gamma(lcd);
lcd->powered_on = true;
return 0;
}
static void td043mtea1_power_off(struct td043mtea1_panel *lcd)
{
if (!lcd->powered_on)
return;
td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
gpiod_set_value(lcd->reset_gpio, 1);
/* wait for at least 2 vsyncs before cutting off power */
msleep(50);
td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY);
regulator_disable(lcd->vcc_reg);
lcd->powered_on = false;
}
/* -----------------------------------------------------------------------------
* sysfs
*/
static ssize_t vmirror_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", lcd->vmirror);
}
static ssize_t vmirror_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
int val;
int ret;
ret = kstrtoint(buf, 0, &val);
if (ret < 0)
return ret;
lcd->vmirror = !!val;
ret = td043mtea1_write_mirror(lcd);
if (ret < 0)
return ret;
return count;
}
static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", lcd->mode);
}
static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
long val;
int ret;
ret = kstrtol(buf, 0, &val);
if (ret != 0 || val & ~7)
return -EINVAL;
lcd->mode = val;
val |= TPO_R02_NCLK_RISING;
td043mtea1_write(lcd, 2, val);
return count;
}
static ssize_t gamma_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
ssize_t len = 0;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(lcd->gamma); i++) {
ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
lcd->gamma[i]);
if (ret < 0)
return ret;
len += ret;
}
buf[len - 1] = '\n';
return len;
}
static ssize_t gamma_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
unsigned int g[12];
unsigned int i;
int ret;
ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
&g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
&g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
if (ret != 12)
return -EINVAL;
for (i = 0; i < 12; i++)
lcd->gamma[i] = g[i];
td043mtea1_write_gamma(lcd);
return count;
}
static DEVICE_ATTR_RW(vmirror);
static DEVICE_ATTR_RW(mode);
static DEVICE_ATTR_RW(gamma);
static struct attribute *td043mtea1_attrs[] = {
&dev_attr_vmirror.attr,
&dev_attr_mode.attr,
&dev_attr_gamma.attr,
NULL,
};
static const struct attribute_group td043mtea1_attr_group = {
.attrs = td043mtea1_attrs,
};
/* -----------------------------------------------------------------------------
* Panel Operations
*/
static int td043mtea1_unprepare(struct drm_panel *panel)
{
struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
if (!lcd->spi_suspended)
td043mtea1_power_off(lcd);
return 0;
}
static int td043mtea1_prepare(struct drm_panel *panel)
{
struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
int ret;
/*
* If we are resuming from system suspend, SPI might not be enabled
* yet, so we'll program the LCD from SPI PM resume callback.
*/
if (lcd->spi_suspended)
return 0;
ret = td043mtea1_power_on(lcd);
if (ret) {
dev_err(&lcd->spi->dev, "%s: power on failed (%d)\n",
__func__, ret);
return ret;
}
return 0;
}
static const struct drm_display_mode td043mtea1_mode = {
.clock = 36000,
.hdisplay = 800,
.hsync_start = 800 + 68,
.hsync_end = 800 + 68 + 1,
.htotal = 800 + 68 + 1 + 214,
.vdisplay = 480,
.vsync_start = 480 + 39,
.vsync_end = 480 + 39 + 1,
.vtotal = 480 + 39 + 1 + 34,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 94,
.height_mm = 56,
};
static int td043mtea1_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &td043mtea1_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = td043mtea1_mode.width_mm;
connector->display_info.height_mm = td043mtea1_mode.height_mm;
/*
* FIXME: According to the datasheet sync signals are sampled on the
* rising edge of the clock, but the code running on the OMAP3 Pandora
* indicates sampling on the falling edge. This should be tested on a
* real device.
*/
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
| DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
return 1;
}
static const struct drm_panel_funcs td043mtea1_funcs = {
.unprepare = td043mtea1_unprepare,
.prepare = td043mtea1_prepare,
.get_modes = td043mtea1_get_modes,
};
/* -----------------------------------------------------------------------------
* Power Management, Probe and Remove
*/
static int __maybe_unused td043mtea1_suspend(struct device *dev)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
if (lcd->powered_on) {
td043mtea1_power_off(lcd);
lcd->powered_on = true;
}
lcd->spi_suspended = true;
return 0;
}
static int __maybe_unused td043mtea1_resume(struct device *dev)
{
struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
int ret;
lcd->spi_suspended = false;
if (lcd->powered_on) {
lcd->powered_on = false;
ret = td043mtea1_power_on(lcd);
if (ret)
return ret;
}
return 0;
}
static SIMPLE_DEV_PM_OPS(td043mtea1_pm_ops, td043mtea1_suspend,
td043mtea1_resume);
static int td043mtea1_probe(struct spi_device *spi)
{
struct td043mtea1_panel *lcd;
int ret;
lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
if (lcd == NULL)
return -ENOMEM;
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
lcd->mode = TPO_R02_MODE_800x480;
memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma));
lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
if (IS_ERR(lcd->vcc_reg))
return dev_err_probe(&spi->dev, PTR_ERR(lcd->vcc_reg),
"failed to get VCC regulator\n");
lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(lcd->reset_gpio))
return dev_err_probe(&spi->dev, PTR_ERR(lcd->reset_gpio),
"failed to get reset GPIO\n");
spi->bits_per_word = 16;
spi->mode = SPI_MODE_0;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
return ret;
}
ret = sysfs_create_group(&spi->dev.kobj, &td043mtea1_attr_group);
if (ret < 0) {
dev_err(&spi->dev, "failed to create sysfs files\n");
return ret;
}
drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&lcd->panel);
return 0;
}
static void td043mtea1_remove(struct spi_device *spi)
{
struct td043mtea1_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
}
static const struct of_device_id td043mtea1_of_match[] = {
{ .compatible = "tpo,td043mtea1", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
static const struct spi_device_id td043mtea1_ids[] = {
{ "td043mtea1", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
static struct spi_driver td043mtea1_driver = {
.probe = td043mtea1_probe,
.remove = td043mtea1_remove,
.id_table = td043mtea1_ids,
.driver = {
.name = "panel-tpo-td043mtea1",
.pm = &td043mtea1_pm_ops,
.of_match_table = td043mtea1_of_match,
},
};
module_spi_driver(td043mtea1_driver);
MODULE_AUTHOR("Gražvydas Ignotas <[email protected]>");
MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-tpo-td043mtea1.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* NEC NL8048HL11 Panel Driver
*
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Based on the omapdrm-specific panel-nec-nl8048hl11 driver
*
* Copyright (C) 2010 Texas Instruments Incorporated
* Author: Erik Gilling <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <drm/drm_connector.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct nl8048_panel {
struct drm_panel panel;
struct spi_device *spi;
struct gpio_desc *reset_gpio;
};
#define to_nl8048_device(p) container_of(p, struct nl8048_panel, panel)
static int nl8048_write(struct nl8048_panel *lcd, unsigned char addr,
unsigned char value)
{
u8 data[4] = { value, 0x01, addr, 0x00 };
int ret;
ret = spi_write(lcd->spi, data, sizeof(data));
if (ret)
dev_err(&lcd->spi->dev, "SPI write to %u failed: %d\n",
addr, ret);
return ret;
}
static int nl8048_init(struct nl8048_panel *lcd)
{
static const struct {
unsigned char addr;
unsigned char data;
} nl8048_init_seq[] = {
{ 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 },
{ 5, 0x14 }, { 6, 0x24 }, { 16, 0xd7 }, { 17, 0x00 },
{ 18, 0x00 }, { 19, 0x55 }, { 20, 0x01 }, { 21, 0x70 },
{ 22, 0x1e }, { 23, 0x25 }, { 24, 0x25 }, { 25, 0x02 },
{ 26, 0x02 }, { 27, 0xa0 }, { 32, 0x2f }, { 33, 0x0f },
{ 34, 0x0f }, { 35, 0x0f }, { 36, 0x0f }, { 37, 0x0f },
{ 38, 0x0f }, { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 },
{ 42, 0x02 }, { 43, 0x0f }, { 44, 0x0f }, { 45, 0x0f },
{ 46, 0x0f }, { 47, 0x0f }, { 48, 0x0f }, { 49, 0x0f },
{ 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
{ 80, 0x0c }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 },
{ 86, 0x14 }, { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 },
{ 92, 0x02 }, { 93, 0x0c }, { 94, 0x1c }, { 95, 0x27 },
{ 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, { 103, 0x27 },
{ 112, 0x01 }, { 113, 0x0e }, { 114, 0x02 }, { 115, 0x0c },
{ 118, 0x0c }, { 121, 0x30 }, { 130, 0x00 }, { 131, 0x00 },
{ 132, 0xfc }, { 134, 0x00 }, { 136, 0x00 }, { 138, 0x00 },
{ 139, 0x00 }, { 140, 0x00 }, { 141, 0xfc }, { 143, 0x00 },
{ 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, { 149, 0x00 },
{ 150, 0xfc }, { 152, 0x00 }, { 154, 0x00 }, { 156, 0x00 },
{ 157, 0x00 },
};
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(nl8048_init_seq); ++i) {
ret = nl8048_write(lcd, nl8048_init_seq[i].addr,
nl8048_init_seq[i].data);
if (ret < 0)
return ret;
}
udelay(20);
return nl8048_write(lcd, 2, 0x00);
}
static int nl8048_disable(struct drm_panel *panel)
{
struct nl8048_panel *lcd = to_nl8048_device(panel);
gpiod_set_value_cansleep(lcd->reset_gpio, 0);
return 0;
}
static int nl8048_enable(struct drm_panel *panel)
{
struct nl8048_panel *lcd = to_nl8048_device(panel);
gpiod_set_value_cansleep(lcd->reset_gpio, 1);
return 0;
}
static const struct drm_display_mode nl8048_mode = {
/* NEC PIX Clock Ratings MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz */
.clock = 23800,
.hdisplay = 800,
.hsync_start = 800 + 6,
.hsync_end = 800 + 6 + 1,
.htotal = 800 + 6 + 1 + 4,
.vdisplay = 480,
.vsync_start = 480 + 3,
.vsync_end = 480 + 3 + 1,
.vtotal = 480 + 3 + 1 + 4,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 89,
.height_mm = 53,
};
static int nl8048_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &nl8048_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = nl8048_mode.width_mm;
connector->display_info.height_mm = nl8048_mode.height_mm;
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
| DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
return 1;
}
static const struct drm_panel_funcs nl8048_funcs = {
.disable = nl8048_disable,
.enable = nl8048_enable,
.get_modes = nl8048_get_modes,
};
static int __maybe_unused nl8048_suspend(struct device *dev)
{
struct nl8048_panel *lcd = dev_get_drvdata(dev);
nl8048_write(lcd, 2, 0x01);
msleep(40);
return 0;
}
static int __maybe_unused nl8048_resume(struct device *dev)
{
struct nl8048_panel *lcd = dev_get_drvdata(dev);
/* Reinitialize the panel. */
spi_setup(lcd->spi);
nl8048_write(lcd, 2, 0x00);
nl8048_init(lcd);
return 0;
}
static SIMPLE_DEV_PM_OPS(nl8048_pm_ops, nl8048_suspend, nl8048_resume);
static int nl8048_probe(struct spi_device *spi)
{
struct nl8048_panel *lcd;
int ret;
lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(lcd->reset_gpio)) {
dev_err(&spi->dev, "failed to parse reset gpio\n");
return PTR_ERR(lcd->reset_gpio);
}
spi->mode = SPI_MODE_0;
spi->bits_per_word = 32;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
return ret;
}
ret = nl8048_init(lcd);
if (ret < 0)
return ret;
drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&lcd->panel);
return 0;
}
static void nl8048_remove(struct spi_device *spi)
{
struct nl8048_panel *lcd = spi_get_drvdata(spi);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
}
static const struct of_device_id nl8048_of_match[] = {
{ .compatible = "nec,nl8048hl11", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, nl8048_of_match);
static const struct spi_device_id nl8048_ids[] = {
{ "nl8048hl11", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, nl8048_ids);
static struct spi_driver nl8048_driver = {
.probe = nl8048_probe,
.remove = nl8048_remove,
.id_table = nl8048_ids,
.driver = {
.name = "panel-nec-nl8048hl11",
.pm = &nl8048_pm_ops,
.of_match_table = nl8048_of_match,
},
};
module_spi_driver(nl8048_driver);
MODULE_AUTHOR("Erik Gilling <[email protected]>");
MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-nec-nl8048hl11.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Novatek NT36523 DriverIC panels driver
*
* Copyright (c) 2022, 2023 Jianhua Lu <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define DSI_NUM_MIN 1
#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
do { \
mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
} while (0)
struct panel_info {
struct drm_panel panel;
struct mipi_dsi_device *dsi[2];
const struct panel_desc *desc;
enum drm_panel_orientation orientation;
struct gpio_desc *reset_gpio;
struct backlight_device *backlight;
struct regulator *vddio;
bool prepared;
};
struct panel_desc {
unsigned int width_mm;
unsigned int height_mm;
unsigned int bpc;
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
const struct drm_display_mode *modes;
unsigned int num_modes;
const struct mipi_dsi_device_info dsi_info;
int (*init_sequence)(struct panel_info *pinfo);
bool is_dual_dsi;
bool has_dcs_backlight;
};
static inline struct panel_info *to_panel_info(struct drm_panel *panel)
{
return container_of(panel, struct panel_info, panel);
}
static int elish_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
/* No datasheet, so write magic init sequence directly */
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
msleep(70);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
return 0;
}
static int elish_csot_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
/* No datasheet, so write magic init sequence directly */
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
msleep(70);
mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
return 0;
}
static int j606f_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi = pinfo->dsi[0];
struct device *dev = &dsi->dev;
int ret;
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9);
mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78);
mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a);
mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63);
mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91);
mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73);
mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb);
mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66);
mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2);
mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3);
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
0xfd, 0x03, 0xff);
mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
0xfd, 0x03, 0xff);
mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
0xfd, 0x03, 0xff);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
0xf5, 0x03, 0xf7);
mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
0xf5, 0x03, 0xf7);
mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
0xf5, 0x03, 0xf7);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77);
mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07);
mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c);
mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c);
mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d);
mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f);
mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f);
mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e);
mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e);
mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d);
mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d);
mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c);
mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c);
mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c);
mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c);
mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d);
mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d);
mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f);
mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f);
mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e);
mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e);
mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d);
mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d);
mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c);
mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20);
mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c);
mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32);
mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44);
mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40);
mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42);
mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06);
mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06);
mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66);
mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a);
mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91);
mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21);
mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43);
ret = mipi_dsi_dcs_set_display_brightness(dsi, 18);
if (ret < 0) {
dev_err(dev, "Failed to set display brightness: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34);
mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02);
mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21);
mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30);
mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba);
mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82);
mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20);
mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c);
mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0);
mipi_dsi_dcs_write_seq(dsi, 0xb6,
0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x05, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a);
mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f);
mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55);
mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88);
mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88);
mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07);
mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba);
mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba);
mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40);
mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba);
mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba);
mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16);
mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16);
mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16);
mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3);
mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff);
mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff);
mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff);
mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1);
mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2);
mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28);
mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30);
mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13);
mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50);
mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51);
mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65);
mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0);
mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86);
mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11);
mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b);
mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb);
mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11);
mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b);
mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3);
mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3);
mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3);
mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11);
mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78);
mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16);
mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82);
mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e);
mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06);
mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80);
mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53);
mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14);
mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d);
mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24);
mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c);
mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25);
mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24);
mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f);
mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a);
mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a);
mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d);
mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70);
mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44);
mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70);
mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70);
mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70);
mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70);
mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31);
mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f);
mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c);
mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a);
mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f);
mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
dev_err(dev, "Failed to set tear on: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
usleep_range(10000, 11000);
mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
ret = mipi_dsi_dcs_set_display_brightness(dsi, 0);
if (ret < 0) {
dev_err(dev, "Failed to set display brightness: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(100);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
msleep(30);
return 0;
}
static const struct drm_display_mode elish_boe_modes[] = {
{
/* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
.clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000,
.hdisplay = 1600,
.hsync_start = 1600 + 60,
.hsync_end = 1600 + 60 + 8,
.htotal = 1600 + 60 + 8 + 60,
.vdisplay = 2560,
.vsync_start = 2560 + 26,
.vsync_end = 2560 + 26 + 4,
.vtotal = 2560 + 26 + 4 + 168,
},
};
static const struct drm_display_mode elish_csot_modes[] = {
{
/* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
.clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000,
.hdisplay = 1600,
.hsync_start = 1600 + 200,
.hsync_end = 1600 + 200 + 40,
.htotal = 1600 + 200 + 40 + 52,
.vdisplay = 2560,
.vsync_start = 2560 + 26,
.vsync_end = 2560 + 26 + 4,
.vtotal = 2560 + 26 + 4 + 168,
},
};
static const struct drm_display_mode j606f_boe_modes[] = {
{
.clock = (1200 + 58 + 2 + 60) * (2000 + 26 + 2 + 93) * 60 / 1000,
.hdisplay = 1200,
.hsync_start = 1200 + 58,
.hsync_end = 1200 + 58 + 2,
.htotal = 1200 + 58 + 2 + 60,
.vdisplay = 2000,
.vsync_start = 2000 + 26,
.vsync_end = 2000 + 26 + 2,
.vtotal = 2000 + 26 + 2 + 93,
.width_mm = 143,
.height_mm = 235,
},
};
static const struct panel_desc elish_boe_desc = {
.modes = elish_boe_modes,
.num_modes = ARRAY_SIZE(elish_boe_modes),
.dsi_info = {
.type = "BOE-elish",
.channel = 0,
.node = NULL,
},
.width_mm = 127,
.height_mm = 203,
.bpc = 8,
.lanes = 3,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
.init_sequence = elish_boe_init_sequence,
.is_dual_dsi = true,
};
static const struct panel_desc elish_csot_desc = {
.modes = elish_csot_modes,
.num_modes = ARRAY_SIZE(elish_csot_modes),
.dsi_info = {
.type = "CSOT-elish",
.channel = 0,
.node = NULL,
},
.width_mm = 127,
.height_mm = 203,
.bpc = 8,
.lanes = 3,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
.init_sequence = elish_csot_init_sequence,
.is_dual_dsi = true,
};
static const struct panel_desc j606f_boe_desc = {
.modes = j606f_boe_modes,
.num_modes = ARRAY_SIZE(j606f_boe_modes),
.width_mm = 143,
.height_mm = 235,
.bpc = 8,
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
.init_sequence = j606f_boe_init_sequence,
.has_dcs_backlight = true,
};
static void nt36523_reset(struct panel_info *pinfo)
{
gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
usleep_range(12000, 13000);
gpiod_set_value_cansleep(pinfo->reset_gpio, 0);
usleep_range(12000, 13000);
gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
usleep_range(12000, 13000);
gpiod_set_value_cansleep(pinfo->reset_gpio, 0);
usleep_range(12000, 13000);
}
static int nt36523_prepare(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
int ret;
if (pinfo->prepared)
return 0;
ret = regulator_enable(pinfo->vddio);
if (ret) {
dev_err(panel->dev, "failed to enable vddio regulator: %d\n", ret);
return ret;
}
nt36523_reset(pinfo);
ret = pinfo->desc->init_sequence(pinfo);
if (ret < 0) {
regulator_disable(pinfo->vddio);
dev_err(panel->dev, "failed to initialize panel: %d\n", ret);
return ret;
}
pinfo->prepared = true;
return 0;
}
static int nt36523_disable(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
int i, ret;
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
if (ret < 0)
dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
}
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
if (ret < 0)
dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
}
msleep(70);
return 0;
}
static int nt36523_unprepare(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
if (!pinfo->prepared)
return 0;
gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
regulator_disable(pinfo->vddio);
pinfo->prepared = false;
return 0;
}
static void nt36523_remove(struct mipi_dsi_device *dsi)
{
struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(pinfo->dsi[0]);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI0 host: %d\n", ret);
if (pinfo->desc->is_dual_dsi) {
ret = mipi_dsi_detach(pinfo->dsi[1]);
if (ret < 0)
dev_err(&pinfo->dsi[1]->dev, "failed to detach from DSI1 host: %d\n", ret);
mipi_dsi_device_unregister(pinfo->dsi[1]);
}
drm_panel_remove(&pinfo->panel);
}
static int nt36523_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct panel_info *pinfo = to_panel_info(panel);
int i;
for (i = 0; i < pinfo->desc->num_modes; i++) {
const struct drm_display_mode *m = &pinfo->desc->modes[i];
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
mode->type = DRM_MODE_TYPE_DRIVER;
if (i == 0)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
connector->display_info.width_mm = pinfo->desc->width_mm;
connector->display_info.height_mm = pinfo->desc->height_mm;
connector->display_info.bpc = pinfo->desc->bpc;
return pinfo->desc->num_modes;
}
static enum drm_panel_orientation nt36523_get_orientation(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
return pinfo->orientation;
}
static const struct drm_panel_funcs nt36523_panel_funcs = {
.disable = nt36523_disable,
.prepare = nt36523_prepare,
.unprepare = nt36523_unprepare,
.get_modes = nt36523_get_modes,
.get_orientation = nt36523_get_orientation,
};
static int nt36523_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
if (ret < 0)
return ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
return 0;
}
static int nt36523_bl_get_brightness(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
if (ret < 0)
return ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
return brightness;
}
static const struct backlight_ops nt36523_bl_ops = {
.update_status = nt36523_bl_update_status,
.get_brightness = nt36523_bl_get_brightness,
};
static struct backlight_device *nt36523_create_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = 512,
.max_brightness = 4095,
.scale = BACKLIGHT_SCALE_NON_LINEAR,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&nt36523_bl_ops, &props);
}
static int nt36523_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct device_node *dsi1;
struct mipi_dsi_host *dsi1_host;
struct panel_info *pinfo;
const struct mipi_dsi_device_info *info;
int i, ret;
pinfo = devm_kzalloc(dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
pinfo->vddio = devm_regulator_get(dev, "vddio");
if (IS_ERR(pinfo->vddio))
return dev_err_probe(dev, PTR_ERR(pinfo->vddio), "failed to get vddio regulator\n");
pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->reset_gpio))
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio), "failed to get reset gpio\n");
pinfo->desc = of_device_get_match_data(dev);
if (!pinfo->desc)
return -ENODEV;
/* If the panel is dual dsi, register DSI1 */
if (pinfo->desc->is_dual_dsi) {
info = &pinfo->desc->dsi_info;
dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
if (!dsi1) {
dev_err(dev, "cannot get secondary DSI node.\n");
return -ENODEV;
}
dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
of_node_put(dsi1);
if (!dsi1_host)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
if (!pinfo->dsi[1]) {
dev_err(dev, "cannot get secondary DSI device\n");
return -ENODEV;
}
}
pinfo->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, pinfo);
drm_panel_init(&pinfo->panel, dev, &nt36523_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = of_drm_get_panel_orientation(dev->of_node, &pinfo->orientation);
if (ret < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
return ret;
}
if (pinfo->desc->has_dcs_backlight) {
pinfo->panel.backlight = nt36523_create_backlight(dsi);
if (IS_ERR(pinfo->panel.backlight))
return dev_err_probe(dev, PTR_ERR(pinfo->panel.backlight),
"Failed to create backlight\n");
} else {
ret = drm_panel_of_backlight(&pinfo->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
}
drm_panel_add(&pinfo->panel);
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
pinfo->dsi[i]->lanes = pinfo->desc->lanes;
pinfo->dsi[i]->format = pinfo->desc->format;
pinfo->dsi[i]->mode_flags = pinfo->desc->mode_flags;
ret = mipi_dsi_attach(pinfo->dsi[i]);
if (ret < 0)
return dev_err_probe(dev, ret, "cannot attach to DSI%d host.\n", i);
}
return 0;
}
static const struct of_device_id nt36523_of_match[] = {
{
.compatible = "lenovo,j606f-boe-nt36523w",
.data = &j606f_boe_desc,
},
{
.compatible = "xiaomi,elish-boe-nt36523",
.data = &elish_boe_desc,
},
{
.compatible = "xiaomi,elish-csot-nt36523",
.data = &elish_csot_desc,
},
{},
};
MODULE_DEVICE_TABLE(of, nt36523_of_match);
static struct mipi_dsi_driver nt36523_driver = {
.probe = nt36523_probe,
.remove = nt36523_remove,
.driver = {
.name = "panel-novatek-nt36523",
.of_match_table = nt36523_of_match,
},
};
module_mipi_dsi_driver(nt36523_driver);
MODULE_AUTHOR("Jianhua Lu <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Novatek NT36523 based MIPI DSI panels");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-novatek-nt36523.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ilitek ILI9341 TFT LCD drm_panel driver.
*
* This panel can be configured to support:
* - 16-bit parallel RGB interface
* - 18-bit parallel RGB interface
* - 4-line serial spi interface
*
* Copyright (C) 2021 Dillon Min <[email protected]>
*
* For dbi+dpi part:
* Derived from drivers/drm/gpu/panel/panel-ilitek-ili9322.c
* the reuse of DBI abstraction part referred from Linus's patch
* "drm/panel: s6e63m0: Switch to DBI abstraction for SPI"
*
* For only-dbi part, copy from David's code (drm/tiny/ili9341.c)
* Copyright 2018 David Lechner <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#define ILI9341_RGB_INTERFACE 0xb0 /* RGB Interface Signal Control */
#define ILI9341_FRC 0xb1 /* Frame Rate Control register */
#define ILI9341_DFC 0xb6 /* Display Function Control register */
#define ILI9341_POWER1 0xc0 /* Power Control 1 register */
#define ILI9341_POWER2 0xc1 /* Power Control 2 register */
#define ILI9341_VCOM1 0xc5 /* VCOM Control 1 register */
#define ILI9341_VCOM2 0xc7 /* VCOM Control 2 register */
#define ILI9341_POWERA 0xcb /* Power control A register */
#define ILI9341_POWERB 0xcf /* Power control B register */
#define ILI9341_PGAMMA 0xe0 /* Positive Gamma Correction register */
#define ILI9341_NGAMMA 0xe1 /* Negative Gamma Correction register */
#define ILI9341_DTCA 0xe8 /* Driver timing control A */
#define ILI9341_DTCB 0xea /* Driver timing control B */
#define ILI9341_POWER_SEQ 0xed /* Power on sequence register */
#define ILI9341_3GAMMA_EN 0xf2 /* 3 Gamma enable register */
#define ILI9341_INTERFACE 0xf6 /* Interface control register */
#define ILI9341_PRC 0xf7 /* Pump ratio control register */
#define ILI9341_ETMOD 0xb7 /* Entry mode set */
#define ILI9341_MADCTL_BGR BIT(3)
#define ILI9341_MADCTL_MV BIT(5)
#define ILI9341_MADCTL_MX BIT(6)
#define ILI9341_MADCTL_MY BIT(7)
#define ILI9341_POWER_B_LEN 3
#define ILI9341_POWER_SEQ_LEN 4
#define ILI9341_DTCA_LEN 3
#define ILI9341_DTCB_LEN 2
#define ILI9341_POWER_A_LEN 5
#define ILI9341_DFC_1_LEN 2
#define ILI9341_FRC_LEN 2
#define ILI9341_VCOM_1_LEN 2
#define ILI9341_DFC_2_LEN 4
#define ILI9341_COLUMN_ADDR_LEN 4
#define ILI9341_PAGE_ADDR_LEN 4
#define ILI9341_INTERFACE_LEN 3
#define ILI9341_PGAMMA_LEN 15
#define ILI9341_NGAMMA_LEN 15
#define ILI9341_CA_LEN 3
#define ILI9341_PIXEL_DPI_16_BITS (BIT(6) | BIT(4))
#define ILI9341_PIXEL_DPI_18_BITS (BIT(6) | BIT(5))
#define ILI9341_GAMMA_CURVE_1 BIT(0)
#define ILI9341_IF_WE_MODE BIT(0)
#define ILI9341_IF_BIG_ENDIAN 0x00
#define ILI9341_IF_DM_RGB BIT(2)
#define ILI9341_IF_DM_INTERNAL 0x00
#define ILI9341_IF_DM_VSYNC BIT(3)
#define ILI9341_IF_RM_RGB BIT(1)
#define ILI9341_IF_RIM_RGB 0x00
#define ILI9341_COLUMN_ADDR 0x00ef
#define ILI9341_PAGE_ADDR 0x013f
#define ILI9341_RGB_EPL BIT(0)
#define ILI9341_RGB_DPL BIT(1)
#define ILI9341_RGB_HSPL BIT(2)
#define ILI9341_RGB_VSPL BIT(3)
#define ILI9341_RGB_DE_MODE BIT(6)
#define ILI9341_RGB_DISP_PATH_MEM BIT(7)
#define ILI9341_DBI_VCOMH_4P6V 0x23
#define ILI9341_DBI_PWR_2_DEFAULT 0x10
#define ILI9341_DBI_PRC_NORMAL 0x20
#define ILI9341_DBI_VCOM_1_VMH_4P25V 0x3e
#define ILI9341_DBI_VCOM_1_VML_1P5V 0x28
#define ILI9341_DBI_VCOM_2_DEC_58 0x86
#define ILI9341_DBI_FRC_DIVA 0x00
#define ILI9341_DBI_FRC_RTNA 0x1b
#define ILI9341_DBI_EMS_GAS BIT(0)
#define ILI9341_DBI_EMS_DTS BIT(1)
#define ILI9341_DBI_EMS_GON BIT(2)
/* struct ili9341_config - the system specific ILI9341 configuration */
struct ili9341_config {
u32 max_spi_speed;
/* mode: the drm display mode */
const struct drm_display_mode mode;
/* ca: TODO: need comments for this register */
u8 ca[ILI9341_CA_LEN];
/* power_b: TODO: need comments for this register */
u8 power_b[ILI9341_POWER_B_LEN];
/* power_seq: TODO: need comments for this register */
u8 power_seq[ILI9341_POWER_SEQ_LEN];
/* dtca: TODO: need comments for this register */
u8 dtca[ILI9341_DTCA_LEN];
/* dtcb: TODO: need comments for this register */
u8 dtcb[ILI9341_DTCB_LEN];
/* power_a: TODO: need comments for this register */
u8 power_a[ILI9341_POWER_A_LEN];
/* frc: Frame Rate Control (In Normal Mode/Full Colors) (B1h) */
u8 frc[ILI9341_FRC_LEN];
/* prc: TODO: need comments for this register */
u8 prc;
/* dfc_1: B6h DISCTRL (Display Function Control) */
u8 dfc_1[ILI9341_DFC_1_LEN];
/* power_1: Power Control 1 (C0h) */
u8 power_1;
/* power_2: Power Control 2 (C1h) */
u8 power_2;
/* vcom_1: VCOM Control 1(C5h) */
u8 vcom_1[ILI9341_VCOM_1_LEN];
/* vcom_2: VCOM Control 2(C7h) */
u8 vcom_2;
/* address_mode: Memory Access Control (36h) */
u8 address_mode;
/* g3amma_en: TODO: need comments for this register */
u8 g3amma_en;
/* rgb_interface: RGB Interface Signal Control (B0h) */
u8 rgb_interface;
/* dfc_2: refer to dfc_1 */
u8 dfc_2[ILI9341_DFC_2_LEN];
/* column_addr: Column Address Set (2Ah) */
u8 column_addr[ILI9341_COLUMN_ADDR_LEN];
/* page_addr: Page Address Set (2Bh) */
u8 page_addr[ILI9341_PAGE_ADDR_LEN];
/* interface: Interface Control (F6h) */
u8 interface[ILI9341_INTERFACE_LEN];
/*
* pixel_format: This command sets the pixel format for the RGB
* image data used by
*/
u8 pixel_format;
/*
* gamma_curve: This command is used to select the desired Gamma
* curve for the
*/
u8 gamma_curve;
/* pgamma: Positive Gamma Correction (E0h) */
u8 pgamma[ILI9341_PGAMMA_LEN];
/* ngamma: Negative Gamma Correction (E1h) */
u8 ngamma[ILI9341_NGAMMA_LEN];
};
struct ili9341 {
struct device *dev;
const struct ili9341_config *conf;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct gpio_desc *dc_gpio;
struct mipi_dbi *dbi;
u32 max_spi_speed;
struct regulator_bulk_data supplies[3];
};
/*
* The Stm32f429-disco board has a panel ili9341 connected to ltdc controller
*/
static const struct ili9341_config ili9341_stm32f429_disco_data = {
.max_spi_speed = 10000000,
.mode = {
.clock = 6100,
.hdisplay = 240,
.hsync_start = 240 + 10,/* hfp 10 */
.hsync_end = 240 + 10 + 10,/* hsync 10 */
.htotal = 240 + 10 + 10 + 20,/* hbp 20 */
.vdisplay = 320,
.vsync_start = 320 + 4,/* vfp 4 */
.vsync_end = 320 + 4 + 2,/* vsync 2 */
.vtotal = 320 + 4 + 2 + 2,/* vbp 2 */
.flags = 0,
.width_mm = 65,
.height_mm = 50,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
},
.ca = {0xc3, 0x08, 0x50},
.power_b = {0x00, 0xc1, 0x30},
.power_seq = {0x64, 0x03, 0x12, 0x81},
.dtca = {0x85, 0x00, 0x78},
.power_a = {0x39, 0x2c, 0x00, 0x34, 0x02},
.prc = 0x20,
.dtcb = {0x00, 0x00},
/* 0x00 fosc, 0x1b 70hz */
.frc = {0x00, 0x1b},
/*
* 0x0a Interval scan, AGND AGND AGND AGND
* 0xa2 Normally white, G1 -> G320, S720 -> S1,
* Scan Cycle 5 frames,85ms
*/
.dfc_1 = {0x0a, 0xa2},
/* 0x10 3.65v */
.power_1 = 0x10,
/* 0x10 AVDD=vci*2, VGH=vci*7, VGL=-vci*4 */
.power_2 = 0x10,
/* 0x45 VCOMH 4.425v, 0x15 VCOML -1.975*/
.vcom_1 = {0x45, 0x15},
/* 0x90 offset voltage, VMH-48, VML-48 */
.vcom_2 = 0x90,
/*
* 0xc8 Row Address Order, Column Address Order
* BGR 1
*/
.address_mode = 0xc8,
.g3amma_en = 0x00,
/*
* 0xc2
* Display Data Path: Memory
* RGB: DE mode
* DOTCLK polarity set (data fetched at the falling time)
*/
.rgb_interface = ILI9341_RGB_DISP_PATH_MEM |
ILI9341_RGB_DE_MODE |
ILI9341_RGB_DPL,
/*
* 0x0a
* Gate outputs in non-display area: Interval scan
* Determine source/VCOM output in a non-display area in the partial
* display mode: AGND AGND AGND AGND
*
* 0xa7
* Scan Cycle: 15 frames
* fFLM = 60Hz: 255ms
* Liquid crystal type: Normally white
* Gate Output Scan Direction: G1 -> G320
* Source Output Scan Direction: S720 -> S1
*
* 0x27
* LCD Driver Line: 320 lines
*
* 0x04
* PCDIV: 4
*/
.dfc_2 = {0x0a, 0xa7, 0x27, 0x04},
/* column address: 240 */
.column_addr = {0x00, 0x00, (ILI9341_COLUMN_ADDR >> 4) & 0xff,
ILI9341_COLUMN_ADDR & 0xff},
/* page address: 320 */
.page_addr = {0x00, 0x00, (ILI9341_PAGE_ADDR >> 4) & 0xff,
ILI9341_PAGE_ADDR & 0xff},
/*
* Memory write control: When the transfer number of data exceeds
* (EC-SC+1)*(EP-SP+1), the column and page number will be
* reset, and the exceeding data will be written into the following
* column and page.
* Display Operation Mode: RGB Interface Mode
* Interface for RAM Access: RGB interface
* 16- bit RGB interface (1 transfer/pixel)
*/
.interface = {ILI9341_IF_WE_MODE, 0x00,
ILI9341_IF_DM_RGB | ILI9341_IF_RM_RGB},
/* DPI: 16 bits / pixel */
.pixel_format = ILI9341_PIXEL_DPI_16_BITS,
/* Curve Selected: Gamma curve 1 (G2.2) */
.gamma_curve = ILI9341_GAMMA_CURVE_1,
.pgamma = {0x0f, 0x29, 0x24, 0x0c, 0x0e,
0x09, 0x4e, 0x78, 0x3c, 0x09,
0x13, 0x05, 0x17, 0x11, 0x00},
.ngamma = {0x00, 0x16, 0x1b, 0x04, 0x11,
0x07, 0x31, 0x33, 0x42, 0x05,
0x0c, 0x0a, 0x28, 0x2f, 0x0f},
};
static inline struct ili9341 *panel_to_ili9341(struct drm_panel *panel)
{
return container_of(panel, struct ili9341, panel);
}
static void ili9341_dpi_init(struct ili9341 *ili)
{
struct device *dev = (&ili->panel)->dev;
struct mipi_dbi *dbi = ili->dbi;
struct ili9341_config *cfg = (struct ili9341_config *)ili->conf;
/* Power Control */
mipi_dbi_command_stackbuf(dbi, 0xca, cfg->ca, ILI9341_CA_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_POWERB, cfg->power_b,
ILI9341_POWER_B_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_POWER_SEQ, cfg->power_seq,
ILI9341_POWER_SEQ_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_DTCA, cfg->dtca,
ILI9341_DTCA_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_POWERA, cfg->power_a,
ILI9341_POWER_A_LEN);
mipi_dbi_command(ili->dbi, ILI9341_PRC, cfg->prc);
mipi_dbi_command_stackbuf(dbi, ILI9341_DTCB, cfg->dtcb,
ILI9341_DTCB_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_FRC, cfg->frc, ILI9341_FRC_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_DFC, cfg->dfc_1,
ILI9341_DFC_1_LEN);
mipi_dbi_command(dbi, ILI9341_POWER1, cfg->power_1);
mipi_dbi_command(dbi, ILI9341_POWER2, cfg->power_2);
/* VCOM */
mipi_dbi_command_stackbuf(dbi, ILI9341_VCOM1, cfg->vcom_1,
ILI9341_VCOM_1_LEN);
mipi_dbi_command(dbi, ILI9341_VCOM2, cfg->vcom_2);
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, cfg->address_mode);
/* Gamma */
mipi_dbi_command(dbi, ILI9341_3GAMMA_EN, cfg->g3amma_en);
mipi_dbi_command(dbi, ILI9341_RGB_INTERFACE, cfg->rgb_interface);
mipi_dbi_command_stackbuf(dbi, ILI9341_DFC, cfg->dfc_2,
ILI9341_DFC_2_LEN);
/* Colomn address set */
mipi_dbi_command_stackbuf(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
cfg->column_addr, ILI9341_COLUMN_ADDR_LEN);
/* Page address set */
mipi_dbi_command_stackbuf(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
cfg->page_addr, ILI9341_PAGE_ADDR_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_INTERFACE, cfg->interface,
ILI9341_INTERFACE_LEN);
/* Format */
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, cfg->pixel_format);
mipi_dbi_command(dbi, MIPI_DCS_WRITE_MEMORY_START);
msleep(200);
mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, cfg->gamma_curve);
mipi_dbi_command_stackbuf(dbi, ILI9341_PGAMMA, cfg->pgamma,
ILI9341_PGAMMA_LEN);
mipi_dbi_command_stackbuf(dbi, ILI9341_NGAMMA, cfg->ngamma,
ILI9341_NGAMMA_LEN);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(200);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
mipi_dbi_command(dbi, MIPI_DCS_WRITE_MEMORY_START);
dev_info(dev, "Initialized display rgb interface\n");
}
static int ili9341_dpi_power_on(struct ili9341 *ili)
{
struct device *dev = (&ili->panel)->dev;
int ret = 0;
/* Assert RESET */
gpiod_set_value(ili->reset_gpio, 1);
/* Enable power */
ret = regulator_bulk_enable(ARRAY_SIZE(ili->supplies),
ili->supplies);
if (ret < 0) {
dev_err(dev, "unable to enable vcc\n");
return ret;
}
msleep(20);
/* De-assert RESET */
gpiod_set_value(ili->reset_gpio, 0);
msleep(20);
return 0;
}
static int ili9341_dpi_power_off(struct ili9341 *ili)
{
/* Assert RESET */
gpiod_set_value(ili->reset_gpio, 1);
/* Disable power */
return regulator_bulk_disable(ARRAY_SIZE(ili->supplies),
ili->supplies);
}
static int ili9341_dpi_disable(struct drm_panel *panel)
{
struct ili9341 *ili = panel_to_ili9341(panel);
mipi_dbi_command(ili->dbi, MIPI_DCS_SET_DISPLAY_OFF);
return 0;
}
static int ili9341_dpi_unprepare(struct drm_panel *panel)
{
struct ili9341 *ili = panel_to_ili9341(panel);
return ili9341_dpi_power_off(ili);
}
static int ili9341_dpi_prepare(struct drm_panel *panel)
{
struct ili9341 *ili = panel_to_ili9341(panel);
int ret;
ret = ili9341_dpi_power_on(ili);
if (ret < 0)
return ret;
ili9341_dpi_init(ili);
return ret;
}
static int ili9341_dpi_enable(struct drm_panel *panel)
{
struct ili9341 *ili = panel_to_ili9341(panel);
mipi_dbi_command(ili->dbi, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static int ili9341_dpi_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct ili9341 *ili = panel_to_ili9341(panel);
struct drm_device *drm = connector->dev;
struct drm_display_mode *mode;
struct drm_display_info *info;
info = &connector->display_info;
info->width_mm = ili->conf->mode.width_mm;
info->height_mm = ili->conf->mode.height_mm;
if (ili->conf->rgb_interface & ILI9341_RGB_DPL)
info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
else
info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
if (ili->conf->rgb_interface & ILI9341_RGB_EPL)
info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
else
info->bus_flags |= DRM_BUS_FLAG_DE_HIGH;
mode = drm_mode_duplicate(drm, &ili->conf->mode);
if (!mode) {
drm_err(drm, "bad mode or failed to add mode\n");
return -EINVAL;
}
drm_mode_set_name(mode);
/* Set up the polarity */
if (ili->conf->rgb_interface & ILI9341_RGB_HSPL)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
else
mode->flags |= DRM_MODE_FLAG_NHSYNC;
if (ili->conf->rgb_interface & ILI9341_RGB_VSPL)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
else
mode->flags |= DRM_MODE_FLAG_NVSYNC;
drm_mode_probed_add(connector, mode);
return 1; /* Number of modes */
}
static const struct drm_panel_funcs ili9341_dpi_funcs = {
.disable = ili9341_dpi_disable,
.unprepare = ili9341_dpi_unprepare,
.prepare = ili9341_dpi_prepare,
.enable = ili9341_dpi_enable,
.get_modes = ili9341_dpi_get_modes,
};
static void ili9341_dbi_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
mipi_dbi_command(dbi, ILI9341_POWERB, 0x00, 0xc1, 0x30);
mipi_dbi_command(dbi, ILI9341_POWER_SEQ, 0x64, 0x03, 0x12, 0x81);
mipi_dbi_command(dbi, ILI9341_DTCA, 0x85, 0x00, 0x78);
mipi_dbi_command(dbi, ILI9341_POWERA, 0x39, 0x2c, 0x00, 0x34, 0x02);
mipi_dbi_command(dbi, ILI9341_PRC, ILI9341_DBI_PRC_NORMAL);
mipi_dbi_command(dbi, ILI9341_DTCB, 0x00, 0x00);
/* Power Control */
mipi_dbi_command(dbi, ILI9341_POWER1, ILI9341_DBI_VCOMH_4P6V);
mipi_dbi_command(dbi, ILI9341_POWER2, ILI9341_DBI_PWR_2_DEFAULT);
/* VCOM */
mipi_dbi_command(dbi, ILI9341_VCOM1, ILI9341_DBI_VCOM_1_VMH_4P25V,
ILI9341_DBI_VCOM_1_VML_1P5V);
mipi_dbi_command(dbi, ILI9341_VCOM2, ILI9341_DBI_VCOM_2_DEC_58);
/* Memory Access Control */
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
mipi_dbi_command(dbi, ILI9341_FRC, ILI9341_DBI_FRC_DIVA & 0x03,
ILI9341_DBI_FRC_RTNA & 0x1f);
/* Gamma */
mipi_dbi_command(dbi, ILI9341_3GAMMA_EN, 0x00);
mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, ILI9341_GAMMA_CURVE_1);
mipi_dbi_command(dbi, ILI9341_PGAMMA,
0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
mipi_dbi_command(dbi, ILI9341_NGAMMA,
0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
/* DDRAM */
mipi_dbi_command(dbi, ILI9341_ETMOD, ILI9341_DBI_EMS_GAS |
ILI9341_DBI_EMS_DTS |
ILI9341_DBI_EMS_GON);
/* Display */
mipi_dbi_command(dbi, ILI9341_DFC, 0x08, 0x82, 0x27, 0x00);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MX;
break;
case 90:
addr_mode = ILI9341_MADCTL_MV;
break;
case 180:
addr_mode = ILI9341_MADCTL_MY;
break;
case 270:
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
ILI9341_MADCTL_MX;
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
drm_info(&dbidev->drm, "Initialized display serial interface\n");
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable),
};
static const struct drm_display_mode ili9341_dbi_mode = {
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops);
static struct drm_driver ili9341_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_dbi_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
.date = "20210716",
.major = 1,
.minor = 0,
};
static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct gpio_desc *reset)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct mipi_dbi *dbi;
struct drm_device *drm;
struct regulator *vcc;
u32 rotation = 0;
int ret;
vcc = devm_regulator_get_optional(dev, "vcc");
if (IS_ERR(vcc)) {
dev_err(dev, "get optional vcc failed\n");
vcc = NULL;
}
dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
dbi->reset = reset;
dbidev->regulator = vcc;
drm_mode_config_init(drm);
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
ret = mipi_dbi_dev_init(dbidev, &ili9341_dbi_funcs,
&ili9341_dbi_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct gpio_desc *reset)
{
struct device *dev = &spi->dev;
struct ili9341 *ili;
int ret;
ili = devm_kzalloc(dev, sizeof(struct ili9341), GFP_KERNEL);
if (!ili)
return -ENOMEM;
ili->dbi = devm_kzalloc(dev, sizeof(struct mipi_dbi),
GFP_KERNEL);
if (!ili->dbi)
return -ENOMEM;
ili->supplies[0].supply = "vci";
ili->supplies[1].supply = "vddi";
ili->supplies[2].supply = "vddi-led";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ili->supplies),
ili->supplies);
if (ret < 0) {
dev_err(dev, "failed to get regulators: %d\n", ret);
return ret;
}
ret = mipi_dbi_spi_init(spi, ili->dbi, dc);
if (ret)
return ret;
spi_set_drvdata(spi, ili);
ili->reset_gpio = reset;
/*
* Every new incarnation of this display must have a unique
* data entry for the system in this driver.
*/
ili->conf = of_device_get_match_data(dev);
if (!ili->conf) {
dev_err(dev, "missing device configuration\n");
return -ENODEV;
}
ili->max_spi_speed = ili->conf->max_spi_speed;
drm_panel_init(&ili->panel, dev, &ili9341_dpi_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&ili->panel);
return 0;
}
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct gpio_desc *dc;
struct gpio_desc *reset;
const struct spi_device_id *id = spi_get_device_id(spi);
reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset))
dev_err(dev, "Failed to get gpio 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
dev_err(dev, "Failed to get gpio 'dc'\n");
if (!strcmp(id->name, "sf-tc240t-9370-t"))
return ili9341_dpi_probe(spi, dc, reset);
else if (!strcmp(id->name, "yx240qv29"))
return ili9341_dbi_probe(spi, dc, reset);
return -1;
}
static void ili9341_remove(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct ili9341 *ili = spi_get_drvdata(spi);
struct drm_device *drm = spi_get_drvdata(spi);
if (!strcmp(id->name, "sf-tc240t-9370-t")) {
ili9341_dpi_power_off(ili);
drm_panel_remove(&ili->panel);
} else if (!strcmp(id->name, "yx240qv29")) {
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
}
static void ili9341_shutdown(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
if (!strcmp(id->name, "yx240qv29"))
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static const struct of_device_id ili9341_of_match[] = {
{
.compatible = "st,sf-tc240t-9370-t",
.data = &ili9341_stm32f429_disco_data,
},
{
/* porting from tiny/ili9341.c
* for original mipi dbi compitable
*/
.compatible = "adafruit,yx240qv29",
.data = NULL,
},
{ }
};
MODULE_DEVICE_TABLE(of, ili9341_of_match);
static const struct spi_device_id ili9341_id[] = {
{ "yx240qv29", 0 },
{ "sf-tc240t-9370-t", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ili9341_id);
static struct spi_driver ili9341_driver = {
.probe = ili9341_probe,
.remove = ili9341_remove,
.shutdown = ili9341_shutdown,
.id_table = ili9341_id,
.driver = {
.name = "panel-ilitek-ili9341",
.of_match_table = ili9341_of_match,
},
};
module_spi_driver(ili9341_driver);
MODULE_AUTHOR("Dillon Min <[email protected]>");
MODULE_DESCRIPTION("ILI9341 LCD panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-ilitek-ili9341.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019, Amarula Solutions.
* Author: Jagan Teki <[email protected]>
*/
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
/* Command2 BKx selection command */
#define DSI_CMD2BKX_SEL 0xFF
#define DSI_CMD1 0
#define DSI_CMD2 BIT(4)
#define DSI_CMD2BK_MASK GENMASK(3, 0)
/* Command2, BK0 commands */
#define DSI_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
#define DSI_CMD2_BK0_NVGAMCTRL 0xB1 /* Negative Voltage Gamma Control */
#define DSI_CMD2_BK0_LNESET 0xC0 /* Display Line setting */
#define DSI_CMD2_BK0_PORCTRL 0xC1 /* Porch control */
#define DSI_CMD2_BK0_INVSEL 0xC2 /* Inversion selection, Frame Rate Control */
/* Command2, BK1 commands */
#define DSI_CMD2_BK1_VRHS 0xB0 /* Vop amplitude setting */
#define DSI_CMD2_BK1_VCOM 0xB1 /* VCOM amplitude setting */
#define DSI_CMD2_BK1_VGHSS 0xB2 /* VGH Voltage setting */
#define DSI_CMD2_BK1_TESTCMD 0xB3 /* TEST Command Setting */
#define DSI_CMD2_BK1_VGLS 0xB5 /* VGL Voltage setting */
#define DSI_CMD2_BK1_PWCTLR1 0xB7 /* Power Control 1 */
#define DSI_CMD2_BK1_PWCTLR2 0xB8 /* Power Control 2 */
#define DSI_CMD2_BK1_SPD1 0xC1 /* Source pre_drive timing set1 */
#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
/* Command2, BK0 bytes */
#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
#define DSI_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
#define DSI_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
#define DSI_CMD2_BK0_LNESET_LDE_EN BIT(7)
#define DSI_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
#define DSI_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
#define DSI_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
#define DSI_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
#define DSI_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
#define DSI_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
/* Command2, BK1 bytes */
#define DSI_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
#define DSI_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
#define DSI_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
#define DSI_CMD2_BK1_TESTCMD_VAL BIT(7)
#define DSI_CMD2_BK1_VGLS_ONES BIT(6)
#define DSI_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
#define DSI_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
#define DSI_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
#define DSI_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
#define DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
#define DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
#define DSI_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
#define DSI_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
#define DSI_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
#define DSI_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
#define DSI_CMD2_BK1_MIPISET1_ONES BIT(7)
#define DSI_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
#define CFIELD_PREP(_mask, _val) \
(((typeof(_mask))(_val) << (__builtin_ffsll(_mask) - 1)) & (_mask))
enum op_bias {
OP_BIAS_OFF = 0,
OP_BIAS_MIN,
OP_BIAS_MIDDLE,
OP_BIAS_MAX
};
struct st7701;
struct st7701_panel_desc {
const struct drm_display_mode *mode;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned int panel_sleep_delay;
/* TFT matrix driver configuration, panel specific. */
const u8 pv_gamma[16]; /* Positive voltage gamma control */
const u8 nv_gamma[16]; /* Negative voltage gamma control */
const u8 nlinv; /* Inversion selection */
const u32 vop_uv; /* Vop in uV */
const u32 vcom_uv; /* Vcom in uV */
const u16 vgh_mv; /* Vgh in mV */
const s16 vgl_mv; /* Vgl in mV */
const u16 avdd_mv; /* Avdd in mV */
const s16 avcl_mv; /* Avcl in mV */
const enum op_bias gamma_op_bias;
const enum op_bias input_op_bias;
const enum op_bias output_op_bias;
const u16 t2d_ns; /* T2D in ns */
const u16 t3d_ns; /* T3D in ns */
const bool eot_en;
/* GIP sequence, fully custom and undocumented. */
void (*gip_sequence)(struct st7701 *st7701);
};
struct st7701 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
const struct st7701_panel_desc *desc;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset;
unsigned int sleep_delay;
enum drm_panel_orientation orientation;
};
static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
{
return container_of(panel, struct st7701, panel);
}
static inline int st7701_dsi_write(struct st7701 *st7701, const void *seq,
size_t len)
{
return mipi_dsi_dcs_write_buffer(st7701->dsi, seq, len);
}
#define ST7701_DSI(st7701, seq...) \
{ \
const u8 d[] = { seq }; \
st7701_dsi_write(st7701, d, ARRAY_SIZE(d)); \
}
static u8 st7701_vgls_map(struct st7701 *st7701)
{
const struct st7701_panel_desc *desc = st7701->desc;
struct {
s32 vgl;
u8 val;
} map[16] = {
{ -7060, 0x0 }, { -7470, 0x1 },
{ -7910, 0x2 }, { -8140, 0x3 },
{ -8650, 0x4 }, { -8920, 0x5 },
{ -9210, 0x6 }, { -9510, 0x7 },
{ -9830, 0x8 }, { -10170, 0x9 },
{ -10530, 0xa }, { -10910, 0xb },
{ -11310, 0xc }, { -11730, 0xd },
{ -12200, 0xe }, { -12690, 0xf }
};
int i;
for (i = 0; i < ARRAY_SIZE(map); i++)
if (desc->vgl_mv == map[i].vgl)
return map[i].val;
return 0;
}
static void st7701_switch_cmd_bkx(struct st7701 *st7701, bool cmd2, u8 bkx)
{
u8 val;
if (cmd2)
val = DSI_CMD2 | FIELD_PREP(DSI_CMD2BK_MASK, bkx);
else
val = DSI_CMD1;
ST7701_DSI(st7701, DSI_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
}
static void st7701_init_sequence(struct st7701 *st7701)
{
const struct st7701_panel_desc *desc = st7701->desc;
const struct drm_display_mode *mode = desc->mode;
const u8 linecount8 = mode->vdisplay / 8;
const u8 linecountrem2 = (mode->vdisplay % 8) / 2;
ST7701_DSI(st7701, MIPI_DCS_SOFT_RESET, 0x00);
/* We need to wait 5ms before sending new commands */
msleep(5);
ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
msleep(st7701->sleep_delay);
/* Command2, BK0 */
st7701_switch_cmd_bkx(st7701, true, 0);
mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
desc->nv_gamma, ARRAY_SIZE(desc->nv_gamma));
/*
* Vertical line count configuration:
* Line[6:0]: select number of vertical lines of the TFT matrix in
* multiples of 8 lines
* LDE_EN: enable sub-8-line granularity line count
* Line_delta[1:0]: add 0/2/4/6 extra lines to line count selected
* using Line[6:0]
*
* Total number of vertical lines:
* LN = ((Line[6:0] + 1) * 8) + (LDE_EN ? Line_delta[1:0] * 2 : 0)
*/
ST7701_DSI(st7701, DSI_CMD2_BK0_LNESET,
FIELD_PREP(DSI_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
(linecountrem2 ? DSI_CMD2_BK0_LNESET_LDE_EN : 0),
FIELD_PREP(DSI_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
ST7701_DSI(st7701, DSI_CMD2_BK0_PORCTRL,
FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VBP_MASK,
mode->vtotal - mode->vsync_end),
FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VFP_MASK,
mode->vsync_start - mode->vdisplay));
/*
* Horizontal pixel count configuration:
* PCLK = 512 + (RTNI[4:0] * 16)
* The PCLK is number of pixel clock per line, which matches
* mode htotal. The minimum is 512 PCLK.
*/
ST7701_DSI(st7701, DSI_CMD2_BK0_INVSEL,
DSI_CMD2_BK0_INVSEL_ONES_MASK |
FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
(clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
/* Command2, BK1 */
st7701_switch_cmd_bkx(st7701, true, 1);
/* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
FIELD_PREP(DSI_CMD2_BK1_VRHA_MASK,
DIV_ROUND_CLOSEST(desc->vop_uv - 3537500, 12500)));
/* Vcom = 0.1V + (VCOM[7:0] * 0.0125V) */
ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM,
FIELD_PREP(DSI_CMD2_BK1_VCOM_MASK,
DIV_ROUND_CLOSEST(desc->vcom_uv - 100000, 12500)));
/* Vgh = 11.5V + (VGHSS[7:0] * 0.5V) */
ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS,
FIELD_PREP(DSI_CMD2_BK1_VGHSS_MASK,
DIV_ROUND_CLOSEST(clamp(desc->vgh_mv,
(u16)11500,
(u16)17000) - 11500,
500)));
ST7701_DSI(st7701, DSI_CMD2_BK1_TESTCMD, DSI_CMD2_BK1_TESTCMD_VAL);
/* Vgl is non-linear */
ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS,
DSI_CMD2_BK1_VGLS_ONES |
FIELD_PREP(DSI_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1,
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_AP_MASK,
desc->gamma_op_bias) |
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APIS_MASK,
desc->input_op_bias) |
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APOS_MASK,
desc->output_op_bias));
/* Avdd = 6.2V + (AVDD[1:0] * 0.2V) , Avcl = -4.4V - (AVCL[1:0] * 0.2V) */
ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2,
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
/* T2D = 0.2us * T2D[3:0] */
ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
DSI_CMD2_BK1_SPD1_ONES_MASK |
FIELD_PREP(DSI_CMD2_BK1_SPD1_T2D_MASK,
DIV_ROUND_CLOSEST(desc->t2d_ns, 200)));
/* T3D = 4us + (0.8us * T3D[3:0]) */
ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2,
DSI_CMD2_BK1_SPD2_ONES_MASK |
FIELD_PREP(DSI_CMD2_BK1_SPD2_T3D_MASK,
DIV_ROUND_CLOSEST(desc->t3d_ns - 4000, 800)));
ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1,
DSI_CMD2_BK1_MIPISET1_ONES |
(desc->eot_en ? DSI_CMD2_BK1_MIPISET1_EOT_EN : 0));
}
static void ts8550b_gip_sequence(struct st7701 *st7701)
{
/**
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
*/
ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
ST7701_DSI(st7701, 0xE1, 0x0B, 0x00, 0x0D, 0x00, 0x0C, 0x00, 0x0E,
0x00, 0x00, 0x44, 0x44);
ST7701_DSI(st7701, 0xE2, 0x33, 0x33, 0x44, 0x44, 0x64, 0x00, 0x66,
0x00, 0x65, 0x00, 0x67, 0x00, 0x00);
ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
ST7701_DSI(st7701, 0xE5, 0x0C, 0x78, 0x3C, 0xA0, 0x0E, 0x78, 0x3C,
0xA0, 0x10, 0x78, 0x3C, 0xA0, 0x12, 0x78, 0x3C, 0xA0);
ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
ST7701_DSI(st7701, 0xE8, 0x0D, 0x78, 0x3C, 0xA0, 0x0F, 0x78, 0x3C,
0xA0, 0x11, 0x78, 0x3C, 0xA0, 0x13, 0x78, 0x3C, 0xA0);
ST7701_DSI(st7701, 0xEB, 0x02, 0x02, 0x39, 0x39, 0xEE, 0x44, 0x00);
ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
ST7701_DSI(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xF3, 0x27, 0x65, 0x40, 0x1F, 0xFF);
}
static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
{
ST7701_DSI(st7701, 0xEE, 0x42);
ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
ST7701_DSI(st7701, 0xE1,
0x04, 0xA0, 0x06, 0xA0,
0x05, 0xA0, 0x07, 0xA0,
0x00, 0x44, 0x44);
ST7701_DSI(st7701, 0xE2,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00);
ST7701_DSI(st7701, 0xE3,
0x00, 0x00, 0x22, 0x22);
ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
ST7701_DSI(st7701, 0xE5,
0x0C, 0x90, 0xA0, 0xA0,
0x0E, 0x92, 0xA0, 0xA0,
0x08, 0x8C, 0xA0, 0xA0,
0x0A, 0x8E, 0xA0, 0xA0);
ST7701_DSI(st7701, 0xE6,
0x00, 0x00, 0x22, 0x22);
ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
ST7701_DSI(st7701, 0xE8,
0x0D, 0x91, 0xA0, 0xA0,
0x0F, 0x93, 0xA0, 0xA0,
0x09, 0x8D, 0xA0, 0xA0,
0x0B, 0x8F, 0xA0, 0xA0);
ST7701_DSI(st7701, 0xEB,
0x00, 0x00, 0xE4, 0xE4,
0x44, 0x00, 0x00);
ST7701_DSI(st7701, 0xED,
0xFF, 0xF5, 0x47, 0x6F,
0x0B, 0xA1, 0xAB, 0xFF,
0xFF, 0xBA, 0x1A, 0xB0,
0xF6, 0x74, 0x5F, 0xFF);
ST7701_DSI(st7701, 0xEF,
0x08, 0x08, 0x08, 0x40,
0x3F, 0x64);
st7701_switch_cmd_bkx(st7701, false, 0);
st7701_switch_cmd_bkx(st7701, true, 3);
ST7701_DSI(st7701, 0xE6, 0x7C);
ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
st7701_switch_cmd_bkx(st7701, false, 0);
ST7701_DSI(st7701, 0x11);
msleep(120);
st7701_switch_cmd_bkx(st7701, true, 3);
ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
msleep(10);
ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
st7701_switch_cmd_bkx(st7701, false, 0);
ST7701_DSI(st7701, 0x11);
msleep(120);
ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
st7701_switch_cmd_bkx(st7701, false, 0);
ST7701_DSI(st7701, 0x3A, 0x70);
}
static void kd50t048a_gip_sequence(struct st7701 *st7701)
{
/**
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
*/
ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
ST7701_DSI(st7701, 0xE1, 0x08, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x09,
0x00, 0x00, 0x33, 0x33);
ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
ST7701_DSI(st7701, 0xE5, 0x0E, 0x60, 0xA0, 0xA0, 0x10, 0x60, 0xA0,
0xA0, 0x0A, 0x60, 0xA0, 0xA0, 0x0C, 0x60, 0xA0, 0xA0);
ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
ST7701_DSI(st7701, 0xE8, 0x0D, 0x60, 0xA0, 0xA0, 0x0F, 0x60, 0xA0,
0xA0, 0x09, 0x60, 0xA0, 0xA0, 0x0B, 0x60, 0xA0, 0xA0);
ST7701_DSI(st7701, 0xEB, 0x02, 0x01, 0xE4, 0xE4, 0x44, 0x00, 0x40);
ST7701_DSI(st7701, 0xEC, 0x02, 0x01);
ST7701_DSI(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x01, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x45, 0x67, 0x98, 0xBA);
}
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
int ret;
gpiod_set_value(st7701->reset, 0);
ret = regulator_bulk_enable(ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
msleep(20);
gpiod_set_value(st7701->reset, 1);
msleep(150);
st7701_init_sequence(st7701);
if (st7701->desc->gip_sequence)
st7701->desc->gip_sequence(st7701);
/* Disable Command2 */
st7701_switch_cmd_bkx(st7701, false, 0);
return 0;
}
static int st7701_enable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
return 0;
}
static int st7701_disable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
return 0;
}
static int st7701_unprepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
ST7701_DSI(st7701, MIPI_DCS_ENTER_SLEEP_MODE, 0x00);
msleep(st7701->sleep_delay);
gpiod_set_value(st7701->reset, 0);
/**
* During the Resetting period, the display will be blanked
* (The display is entering blanking sequence, which maximum
* time is 120 ms, when Reset Starts in Sleep Out –mode. The
* display remains the blank state in Sleep In –mode.) and
* then return to Default condition for Hardware Reset.
*
* So we need wait sleep_delay time to make sure reset completed.
*/
msleep(st7701->sleep_delay);
regulator_bulk_disable(ARRAY_SIZE(st7701->supplies), st7701->supplies);
return 0;
}
static int st7701_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct st7701 *st7701 = panel_to_st7701(panel);
const struct drm_display_mode *desc_mode = st7701->desc->mode;
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, desc_mode);
if (!mode) {
dev_err(&st7701->dsi->dev, "failed to add mode %ux%u@%u\n",
desc_mode->hdisplay, desc_mode->vdisplay,
drm_mode_vrefresh(desc_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = desc_mode->width_mm;
connector->display_info.height_mm = desc_mode->height_mm;
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, st7701->orientation);
return 1;
}
static enum drm_panel_orientation st7701_get_orientation(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
return st7701->orientation;
}
static const struct drm_panel_funcs st7701_funcs = {
.disable = st7701_disable,
.unprepare = st7701_unprepare,
.prepare = st7701_prepare,
.enable = st7701_enable,
.get_modes = st7701_get_modes,
.get_orientation = st7701_get_orientation,
};
static const struct drm_display_mode ts8550b_mode = {
.clock = 27500,
.hdisplay = 480,
.hsync_start = 480 + 38,
.hsync_end = 480 + 38 + 12,
.htotal = 480 + 38 + 12 + 12,
.vdisplay = 854,
.vsync_start = 854 + 18,
.vsync_end = 854 + 18 + 8,
.vtotal = 854 + 18 + 8 + 4,
.width_mm = 69,
.height_mm = 139,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct st7701_panel_desc ts8550b_desc = {
.mode = &ts8550b_mode,
.lanes = 2,
.format = MIPI_DSI_FMT_RGB888,
.panel_sleep_delay = 80, /* panel need extra 80ms for sleep out cmd */
.pv_gamma = {
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nv_gamma = {
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nlinv = 7,
.vop_uv = 4400000,
.vcom_uv = 337500,
.vgh_mv = 15000,
.vgl_mv = -9510,
.avdd_mv = 6600,
.avcl_mv = -4400,
.gamma_op_bias = OP_BIAS_MAX,
.input_op_bias = OP_BIAS_MIN,
.output_op_bias = OP_BIAS_MIN,
.t2d_ns = 1600,
.t3d_ns = 10400,
.eot_en = true,
.gip_sequence = ts8550b_gip_sequence,
};
static const struct drm_display_mode dmt028vghmcmi_1a_mode = {
.clock = 22325,
.hdisplay = 480,
.hsync_start = 480 + 40,
.hsync_end = 480 + 40 + 4,
.htotal = 480 + 40 + 4 + 20,
.vdisplay = 640,
.vsync_start = 640 + 2,
.vsync_end = 640 + 2 + 40,
.vtotal = 640 + 2 + 40 + 16,
.width_mm = 56,
.height_mm = 78,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct st7701_panel_desc dmt028vghmcmi_1a_desc = {
.mode = &dmt028vghmcmi_1a_mode,
.lanes = 2,
.format = MIPI_DSI_FMT_RGB888,
.panel_sleep_delay = 5, /* panel need extra 5ms for sleep out cmd */
.pv_gamma = {
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nv_gamma = {
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nlinv = 1,
.vop_uv = 4800000,
.vcom_uv = 1650000,
.vgh_mv = 15000,
.vgl_mv = -10170,
.avdd_mv = 6600,
.avcl_mv = -4400,
.gamma_op_bias = OP_BIAS_MIDDLE,
.input_op_bias = OP_BIAS_MIN,
.output_op_bias = OP_BIAS_MIN,
.t2d_ns = 1600,
.t3d_ns = 10400,
.eot_en = true,
.gip_sequence = dmt028vghmcmi_1a_gip_sequence,
};
static const struct drm_display_mode kd50t048a_mode = {
.clock = 27500,
.hdisplay = 480,
.hsync_start = 480 + 2,
.hsync_end = 480 + 2 + 10,
.htotal = 480 + 2 + 10 + 2,
.vdisplay = 854,
.vsync_start = 854 + 2,
.vsync_end = 854 + 2 + 2,
.vtotal = 854 + 2 + 2 + 17,
.width_mm = 69,
.height_mm = 139,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct st7701_panel_desc kd50t048a_desc = {
.mode = &kd50t048a_mode,
.lanes = 2,
.format = MIPI_DSI_FMT_RGB888,
.panel_sleep_delay = 0,
.pv_gamma = {
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x2),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1e),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x23),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
},
.nv_gamma = {
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xc),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xc),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x3),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x24),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
},
.nlinv = 1,
.vop_uv = 4887500,
.vcom_uv = 937500,
.vgh_mv = 15000,
.vgl_mv = -9510,
.avdd_mv = 6600,
.avcl_mv = -4400,
.gamma_op_bias = OP_BIAS_MIDDLE,
.input_op_bias = OP_BIAS_MIN,
.output_op_bias = OP_BIAS_MIN,
.t2d_ns = 1600,
.t3d_ns = 10400,
.eot_en = true,
.gip_sequence = kd50t048a_gip_sequence,
};
static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct st7701_panel_desc *desc;
struct st7701 *st7701;
int ret;
st7701 = devm_kzalloc(&dsi->dev, sizeof(*st7701), GFP_KERNEL);
if (!st7701)
return -ENOMEM;
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
st7701->supplies[0].supply = "VCC";
st7701->supplies[1].supply = "IOVCC";
ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
st7701->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(st7701->reset)) {
dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(st7701->reset);
}
ret = of_drm_get_panel_orientation(dsi->dev.of_node, &st7701->orientation);
if (ret < 0)
return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
DRM_MODE_CONNECTOR_DSI);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
* before initiating new commands.
*
* On top of that some panels might need an extra delay to wait, so
* add panel specific delay for those cases. As now this panel specific
* delay information is referenced from those panel BSP driver, example
* ts8550b and there is no valid documentation for that.
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
ret = drm_panel_of_backlight(&st7701->panel);
if (ret)
return ret;
drm_panel_add(&st7701->panel);
mipi_dsi_set_drvdata(dsi, st7701);
st7701->dsi = dsi;
st7701->desc = desc;
ret = mipi_dsi_attach(dsi);
if (ret)
goto err_attach;
return 0;
err_attach:
drm_panel_remove(&st7701->panel);
return ret;
}
static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
{
struct st7701 *st7701 = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&st7701->panel);
}
static const struct of_device_id st7701_of_match[] = {
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
{ }
};
MODULE_DEVICE_TABLE(of, st7701_of_match);
static struct mipi_dsi_driver st7701_dsi_driver = {
.probe = st7701_dsi_probe,
.remove = st7701_dsi_remove,
.driver = {
.name = "st7701",
.of_match_table = st7701_of_match,
},
};
module_mipi_dsi_driver(st7701_dsi_driver);
MODULE_AUTHOR("Jagan Teki <[email protected]>");
MODULE_DESCRIPTION("Sitronix ST7701 LCD Panel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-sitronix-st7701.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, Linaro Limited
*
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct truly_nt35521 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
struct gpio_desc *blen_gpio;
bool prepared;
bool enabled;
};
static inline
struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
{
return container_of(panel, struct truly_nt35521, panel);
}
static void truly_nt35521_reset(struct truly_nt35521 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(1000, 2000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(10000, 11000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(150);
}
static int truly_nt35521_on(struct truly_nt35521 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x01);
mipi_dsi_generic_write_seq(dsi, 0xb1, 0x21);
mipi_dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01);
mipi_dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11);
mipi_dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb6, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
mipi_dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09);
mipi_dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09);
mipi_dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xca, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc0, 0x04);
mipi_dsi_generic_write_seq(dsi, 0xbe, 0xb5);
mipi_dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35);
mipi_dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25);
mipi_dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43);
mipi_dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xee, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xb0,
0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11);
mipi_dsi_generic_write_seq(dsi, 0xb1,
0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3,
0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77);
mipi_dsi_generic_write_seq(dsi, 0xb2,
0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c,
0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90);
mipi_dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98);
mipi_dsi_generic_write_seq(dsi, 0xb4,
0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9,
0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xb5,
0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf,
0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76);
mipi_dsi_generic_write_seq(dsi, 0xb6,
0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c,
0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2);
mipi_dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba);
mipi_dsi_generic_write_seq(dsi, 0xb8,
0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a,
0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9);
mipi_dsi_generic_write_seq(dsi, 0xb9,
0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3,
0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67);
mipi_dsi_generic_write_seq(dsi, 0xba,
0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
mipi_dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc4, 0x60);
mipi_dsi_generic_write_seq(dsi, 0xc5, 0xc0);
mipi_dsi_generic_write_seq(dsi, 0xc6, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc7, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
mipi_dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb8, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xc0, 0x0b);
mipi_dsi_generic_write_seq(dsi, 0xc1, 0x09);
mipi_dsi_generic_write_seq(dsi, 0xc2, 0xa6);
mipi_dsi_generic_write_seq(dsi, 0xc3, 0x05);
mipi_dsi_generic_write_seq(dsi, 0xc4, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xc5, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xc6, 0x22);
mipi_dsi_generic_write_seq(dsi, 0xc7, 0x03);
mipi_dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20);
mipi_dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20);
mipi_dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60);
mipi_dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60);
mipi_dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10);
mipi_dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10);
mipi_dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10);
mipi_dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10);
mipi_dsi_generic_write_seq(dsi, 0xd0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xd5,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xd6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xd7,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xe5, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xe6, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xe7, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xe8, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xe9, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xea, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xeb, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xec, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xed, 0x30);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
mipi_dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e);
mipi_dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34);
mipi_dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a);
mipi_dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10);
mipi_dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16);
mipi_dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08);
mipi_dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01);
mipi_dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19);
mipi_dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13);
mipi_dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29);
mipi_dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d);
mipi_dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d);
mipi_dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34);
mipi_dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a);
mipi_dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19);
mipi_dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13);
mipi_dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01);
mipi_dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08);
mipi_dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10);
mipi_dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16);
mipi_dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29);
mipi_dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e);
mipi_dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31);
mipi_dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xe7, 0x00);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x02);
mipi_dsi_generic_write_seq(dsi, 0xf7, 0x47);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x0a);
mipi_dsi_generic_write_seq(dsi, 0xf7, 0x02);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x17);
mipi_dsi_generic_write_seq(dsi, 0xf4, 0x60);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x01);
mipi_dsi_generic_write_seq(dsi, 0xf9, 0x46);
mipi_dsi_generic_write_seq(dsi, 0x6f, 0x11);
mipi_dsi_generic_write_seq(dsi, 0xf3, 0x01);
mipi_dsi_generic_write_seq(dsi, 0x35, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
mipi_dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21);
mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
mipi_dsi_generic_write_seq(dsi, 0x35, 0x00);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
usleep_range(1000, 2000);
mipi_dsi_generic_write_seq(dsi, 0x53, 0x24);
return 0;
}
static int truly_nt35521_off(struct truly_nt35521 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
msleep(50);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(150);
return 0;
}
static int truly_nt35521_prepare(struct drm_panel *panel)
{
struct truly_nt35521 *ctx = to_truly_nt35521(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
return ret;
}
truly_nt35521_reset(ctx);
ret = truly_nt35521_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
ctx->prepared = true;
return 0;
}
static int truly_nt35521_unprepare(struct drm_panel *panel)
{
struct truly_nt35521 *ctx = to_truly_nt35521(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (!ctx->prepared)
return 0;
ret = truly_nt35521_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
ctx->prepared = false;
return 0;
}
static int truly_nt35521_enable(struct drm_panel *panel)
{
struct truly_nt35521 *ctx = to_truly_nt35521(panel);
if (ctx->enabled)
return 0;
gpiod_set_value_cansleep(ctx->blen_gpio, 1);
ctx->enabled = true;
return 0;
}
static int truly_nt35521_disable(struct drm_panel *panel)
{
struct truly_nt35521 *ctx = to_truly_nt35521(panel);
if (!ctx->enabled)
return 0;
gpiod_set_value_cansleep(ctx->blen_gpio, 0);
ctx->enabled = false;
return 0;
}
static const struct drm_display_mode truly_nt35521_mode = {
.clock = (720 + 232 + 20 + 112) * (1280 + 18 + 1 + 18) * 60 / 1000,
.hdisplay = 720,
.hsync_start = 720 + 232,
.hsync_end = 720 + 232 + 20,
.htotal = 720 + 232 + 20 + 112,
.vdisplay = 1280,
.vsync_start = 1280 + 18,
.vsync_end = 1280 + 18 + 1,
.vtotal = 1280 + 18 + 1 + 18,
.width_mm = 65,
.height_mm = 116,
};
static int truly_nt35521_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &truly_nt35521_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs truly_nt35521_panel_funcs = {
.prepare = truly_nt35521_prepare,
.unprepare = truly_nt35521_unprepare,
.enable = truly_nt35521_enable,
.disable = truly_nt35521_disable,
.get_modes = truly_nt35521_get_modes,
};
static int truly_nt35521_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
int ret;
ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
if (ret < 0)
return ret;
return 0;
}
static int truly_nt35521_bl_get_brightness(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness;
int ret;
ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
if (ret < 0)
return ret;
return brightness & 0xff;
}
static const struct backlight_ops truly_nt35521_bl_ops = {
.update_status = truly_nt35521_bl_update_status,
.get_brightness = truly_nt35521_bl_get_brightness,
};
static struct backlight_device *
truly_nt35521_create_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = 255,
.max_brightness = 255,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&truly_nt35521_bl_ops, &props);
}
static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct truly_nt35521 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->supplies[0].supply = "positive5";
ctx->supplies[1].supply = "negative5";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to get regulators: %d\n", ret);
return ret;
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->blen_gpio = devm_gpiod_get(dev, "backlight", GPIOD_OUT_LOW);
if (IS_ERR(ctx->blen_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->blen_gpio),
"Failed to get backlight-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = truly_nt35521_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
"Failed to create backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void truly_nt35521_remove(struct mipi_dsi_device *dsi)
{
struct truly_nt35521 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id truly_nt35521_of_match[] = {
{ .compatible = "sony,tulip-truly-nt35521" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, truly_nt35521_of_match);
static struct mipi_dsi_driver truly_nt35521_driver = {
.probe = truly_nt35521_probe,
.remove = truly_nt35521_remove,
.driver = {
.name = "panel-truly-nt35521",
.of_match_table = truly_nt35521_of_match,
},
};
module_mipi_dsi_driver(truly_nt35521_driver);
MODULE_AUTHOR("Shawn Guo <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Sony Tulip Truly NT35521 panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
* Author: Peter Ujfalusi <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
struct osd101t2587_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
struct regulator *supply;
bool prepared;
bool enabled;
const struct drm_display_mode *default_mode;
};
static inline struct osd101t2587_panel *ti_osd_panel(struct drm_panel *panel)
{
return container_of(panel, struct osd101t2587_panel, base);
}
static int osd101t2587_panel_disable(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
int ret;
if (!osd101t2587->enabled)
return 0;
ret = mipi_dsi_shutdown_peripheral(osd101t2587->dsi);
osd101t2587->enabled = false;
return ret;
}
static int osd101t2587_panel_unprepare(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
if (!osd101t2587->prepared)
return 0;
regulator_disable(osd101t2587->supply);
osd101t2587->prepared = false;
return 0;
}
static int osd101t2587_panel_prepare(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
int ret;
if (osd101t2587->prepared)
return 0;
ret = regulator_enable(osd101t2587->supply);
if (!ret)
osd101t2587->prepared = true;
return ret;
}
static int osd101t2587_panel_enable(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
int ret;
if (osd101t2587->enabled)
return 0;
ret = mipi_dsi_turn_on_peripheral(osd101t2587->dsi);
if (ret)
return ret;
osd101t2587->enabled = true;
return ret;
}
static const struct drm_display_mode default_mode_osd101t2587 = {
.clock = 164400,
.hdisplay = 1920,
.hsync_start = 1920 + 152,
.hsync_end = 1920 + 152 + 52,
.htotal = 1920 + 152 + 52 + 20,
.vdisplay = 1200,
.vsync_start = 1200 + 24,
.vsync_end = 1200 + 24 + 6,
.vtotal = 1200 + 24 + 6 + 48,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static int osd101t2587_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, osd101t2587->default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
osd101t2587->default_mode->hdisplay,
osd101t2587->default_mode->vdisplay,
drm_mode_vrefresh(osd101t2587->default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 217;
connector->display_info.height_mm = 136;
return 1;
}
static const struct drm_panel_funcs osd101t2587_panel_funcs = {
.disable = osd101t2587_panel_disable,
.unprepare = osd101t2587_panel_unprepare,
.prepare = osd101t2587_panel_prepare,
.enable = osd101t2587_panel_enable,
.get_modes = osd101t2587_panel_get_modes,
};
static const struct of_device_id osd101t2587_of_match[] = {
{
.compatible = "osddisplays,osd101t2587-53ts",
.data = &default_mode_osd101t2587,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, osd101t2587_of_match);
static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
{
struct device *dev = &osd101t2587->dsi->dev;
int ret;
osd101t2587->supply = devm_regulator_get(dev, "power");
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
&osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&osd101t2587->base);
if (ret)
return ret;
drm_panel_add(&osd101t2587->base);
return 0;
}
static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587;
const struct of_device_id *id;
int ret;
id = of_match_node(osd101t2587_of_match, dsi->dev.of_node);
if (!id)
return -ENODEV;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET;
osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL);
if (!osd101t2587)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, osd101t2587);
osd101t2587->dsi = dsi;
osd101t2587->default_mode = id->data;
ret = osd101t2587_panel_add(osd101t2587);
if (ret < 0)
return ret;
ret = mipi_dsi_attach(dsi);
if (ret)
drm_panel_remove(&osd101t2587->base);
return ret;
}
static void osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_disable(&osd101t2587->base);
if (ret < 0)
dev_warn(&dsi->dev, "failed to disable panel: %d\n", ret);
drm_panel_unprepare(&osd101t2587->base);
drm_panel_remove(&osd101t2587->base);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
}
static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
drm_panel_disable(&osd101t2587->base);
drm_panel_unprepare(&osd101t2587->base);
}
static struct mipi_dsi_driver osd101t2587_panel_driver = {
.driver = {
.name = "panel-osd-osd101t2587-53ts",
.of_match_table = osd101t2587_of_match,
},
.probe = osd101t2587_panel_probe,
.remove = osd101t2587_panel_remove,
.shutdown = osd101t2587_panel_shutdown,
};
module_mipi_dsi_driver(osd101t2587_panel_driver);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("OSD101T2587-53TS DSI panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Jitao Shi <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int bpc;
/**
* @width_mm: width of the panel's active display area
* @height_mm: height of the panel's active display area
*/
struct {
unsigned int width_mm;
unsigned int height_mm;
} size;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
const struct panel_init_cmd *init_cmds;
unsigned int lanes;
bool discharge_on_disable;
bool lp11_before_reset;
};
struct boe_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
const struct panel_desc *desc;
enum drm_panel_orientation orientation;
struct regulator *pp3300;
struct regulator *pp1800;
struct regulator *avee;
struct regulator *avdd;
struct gpio_desc *enable_gpio;
bool prepared;
};
enum dsi_cmd_type {
INIT_DCS_CMD,
DELAY_CMD,
};
struct panel_init_cmd {
enum dsi_cmd_type type;
size_t len;
const char *data;
};
#define _INIT_DCS_CMD(...) { \
.type = INIT_DCS_CMD, \
.len = sizeof((char[]){__VA_ARGS__}), \
.data = (char[]){__VA_ARGS__} }
#define _INIT_DELAY_CMD(...) { \
.type = DELAY_CMD,\
.len = sizeof((char[]){__VA_ARGS__}), \
.data = (char[]){__VA_ARGS__} }
static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0xD9),
_INIT_DCS_CMD(0x07, 0x78),
_INIT_DCS_CMD(0x08, 0x5A),
_INIT_DCS_CMD(0x0D, 0x63),
_INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x73),
_INIT_DCS_CMD(0x95, 0xE6),
_INIT_DCS_CMD(0x96, 0xF0),
_INIT_DCS_CMD(0x30, 0x00),
_INIT_DCS_CMD(0x6D, 0x66),
_INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0x3B),
_INIT_DCS_CMD(0xB0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
_INIT_DCS_CMD(0xB1, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
_INIT_DCS_CMD(0xB2, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
_INIT_DCS_CMD(0xB3, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
_INIT_DCS_CMD(0xB4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
_INIT_DCS_CMD(0xB5, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
_INIT_DCS_CMD(0xB6, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
_INIT_DCS_CMD(0xB7, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
_INIT_DCS_CMD(0xB8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
_INIT_DCS_CMD(0xB9, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
_INIT_DCS_CMD(0xBA, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
_INIT_DCS_CMD(0xBB, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
_INIT_DCS_CMD(0xFF, 0x21),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
_INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
_INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
_INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0x00),
_INIT_DCS_CMD(0x01, 0x00),
_INIT_DCS_CMD(0x02, 0x1C),
_INIT_DCS_CMD(0x03, 0x1C),
_INIT_DCS_CMD(0x04, 0x1D),
_INIT_DCS_CMD(0x05, 0x1D),
_INIT_DCS_CMD(0x06, 0x04),
_INIT_DCS_CMD(0x07, 0x04),
_INIT_DCS_CMD(0x08, 0x0F),
_INIT_DCS_CMD(0x09, 0x0F),
_INIT_DCS_CMD(0x0A, 0x0E),
_INIT_DCS_CMD(0x0B, 0x0E),
_INIT_DCS_CMD(0x0C, 0x0D),
_INIT_DCS_CMD(0x0D, 0x0D),
_INIT_DCS_CMD(0x0E, 0x0C),
_INIT_DCS_CMD(0x0F, 0x0C),
_INIT_DCS_CMD(0x10, 0x08),
_INIT_DCS_CMD(0x11, 0x08),
_INIT_DCS_CMD(0x12, 0x00),
_INIT_DCS_CMD(0x13, 0x00),
_INIT_DCS_CMD(0x14, 0x00),
_INIT_DCS_CMD(0x15, 0x00),
_INIT_DCS_CMD(0x16, 0x00),
_INIT_DCS_CMD(0x17, 0x00),
_INIT_DCS_CMD(0x18, 0x1C),
_INIT_DCS_CMD(0x19, 0x1C),
_INIT_DCS_CMD(0x1A, 0x1D),
_INIT_DCS_CMD(0x1B, 0x1D),
_INIT_DCS_CMD(0x1C, 0x04),
_INIT_DCS_CMD(0x1D, 0x04),
_INIT_DCS_CMD(0x1E, 0x0F),
_INIT_DCS_CMD(0x1F, 0x0F),
_INIT_DCS_CMD(0x20, 0x0E),
_INIT_DCS_CMD(0x21, 0x0E),
_INIT_DCS_CMD(0x22, 0x0D),
_INIT_DCS_CMD(0x23, 0x0D),
_INIT_DCS_CMD(0x24, 0x0C),
_INIT_DCS_CMD(0x25, 0x0C),
_INIT_DCS_CMD(0x26, 0x08),
_INIT_DCS_CMD(0x27, 0x08),
_INIT_DCS_CMD(0x28, 0x00),
_INIT_DCS_CMD(0x29, 0x00),
_INIT_DCS_CMD(0x2A, 0x00),
_INIT_DCS_CMD(0x2B, 0x00),
_INIT_DCS_CMD(0x2D, 0x20),
_INIT_DCS_CMD(0x2F, 0x0A),
_INIT_DCS_CMD(0x30, 0x44),
_INIT_DCS_CMD(0x33, 0x0C),
_INIT_DCS_CMD(0x34, 0x32),
_INIT_DCS_CMD(0x37, 0x44),
_INIT_DCS_CMD(0x38, 0x40),
_INIT_DCS_CMD(0x39, 0x00),
_INIT_DCS_CMD(0x3A, 0x5D),
_INIT_DCS_CMD(0x3B, 0x60),
_INIT_DCS_CMD(0x3D, 0x42),
_INIT_DCS_CMD(0x3F, 0x06),
_INIT_DCS_CMD(0x43, 0x06),
_INIT_DCS_CMD(0x47, 0x66),
_INIT_DCS_CMD(0x4A, 0x5D),
_INIT_DCS_CMD(0x4B, 0x60),
_INIT_DCS_CMD(0x4C, 0x91),
_INIT_DCS_CMD(0x4D, 0x21),
_INIT_DCS_CMD(0x4E, 0x43),
_INIT_DCS_CMD(0x51, 0x12),
_INIT_DCS_CMD(0x52, 0x34),
_INIT_DCS_CMD(0x55, 0x82, 0x02),
_INIT_DCS_CMD(0x56, 0x04),
_INIT_DCS_CMD(0x58, 0x21),
_INIT_DCS_CMD(0x59, 0x30),
_INIT_DCS_CMD(0x5A, 0x60),
_INIT_DCS_CMD(0x5B, 0x50),
_INIT_DCS_CMD(0x5E, 0x00, 0x06),
_INIT_DCS_CMD(0x5F, 0x00),
_INIT_DCS_CMD(0x65, 0x82),
_INIT_DCS_CMD(0x7E, 0x20),
_INIT_DCS_CMD(0x7F, 0x3C),
_INIT_DCS_CMD(0x82, 0x04),
_INIT_DCS_CMD(0x97, 0xC0),
_INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0x91, 0x44),
_INIT_DCS_CMD(0x92, 0xA9),
_INIT_DCS_CMD(0x93, 0x1A),
_INIT_DCS_CMD(0x94, 0x96),
_INIT_DCS_CMD(0xD7, 0x55),
_INIT_DCS_CMD(0xDA, 0x0A),
_INIT_DCS_CMD(0xDE, 0x08),
_INIT_DCS_CMD(0xDB, 0x05),
_INIT_DCS_CMD(0xDC, 0xA9),
_INIT_DCS_CMD(0xDD, 0x22),
_INIT_DCS_CMD(0xDF, 0x05),
_INIT_DCS_CMD(0xE0, 0xA9),
_INIT_DCS_CMD(0xE1, 0x05),
_INIT_DCS_CMD(0xE2, 0xA9),
_INIT_DCS_CMD(0xE3, 0x05),
_INIT_DCS_CMD(0xE4, 0xA9),
_INIT_DCS_CMD(0xE5, 0x05),
_INIT_DCS_CMD(0xE6, 0xA9),
_INIT_DCS_CMD(0x5C, 0x00),
_INIT_DCS_CMD(0x5D, 0x00),
_INIT_DCS_CMD(0x8D, 0x00),
_INIT_DCS_CMD(0x8E, 0x00),
_INIT_DCS_CMD(0xB5, 0x90),
_INIT_DCS_CMD(0xFF, 0x25),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0x00),
_INIT_DCS_CMD(0x19, 0x07),
_INIT_DCS_CMD(0x1F, 0x60),
_INIT_DCS_CMD(0x20, 0x50),
_INIT_DCS_CMD(0x26, 0x60),
_INIT_DCS_CMD(0x27, 0x50),
_INIT_DCS_CMD(0x33, 0x60),
_INIT_DCS_CMD(0x34, 0x50),
_INIT_DCS_CMD(0x3F, 0xE0),
_INIT_DCS_CMD(0x40, 0x00),
_INIT_DCS_CMD(0x44, 0x00),
_INIT_DCS_CMD(0x45, 0x40),
_INIT_DCS_CMD(0x48, 0x60),
_INIT_DCS_CMD(0x49, 0x50),
_INIT_DCS_CMD(0x5B, 0x00),
_INIT_DCS_CMD(0x5C, 0x00),
_INIT_DCS_CMD(0x5D, 0x00),
_INIT_DCS_CMD(0x5E, 0xD0),
_INIT_DCS_CMD(0x61, 0x60),
_INIT_DCS_CMD(0x62, 0x50),
_INIT_DCS_CMD(0xF1, 0x10),
_INIT_DCS_CMD(0xFF, 0x2A),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x64, 0x16),
_INIT_DCS_CMD(0x67, 0x16),
_INIT_DCS_CMD(0x6A, 0x16),
_INIT_DCS_CMD(0x70, 0x30),
_INIT_DCS_CMD(0xA2, 0xF3),
_INIT_DCS_CMD(0xA3, 0xFF),
_INIT_DCS_CMD(0xA4, 0xFF),
_INIT_DCS_CMD(0xA5, 0xFF),
_INIT_DCS_CMD(0xD6, 0x08),
_INIT_DCS_CMD(0xFF, 0x26),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0xA1),
_INIT_DCS_CMD(0x02, 0x31),
_INIT_DCS_CMD(0x04, 0x28),
_INIT_DCS_CMD(0x06, 0x30),
_INIT_DCS_CMD(0x0C, 0x16),
_INIT_DCS_CMD(0x0D, 0x0D),
_INIT_DCS_CMD(0x0F, 0x00),
_INIT_DCS_CMD(0x11, 0x00),
_INIT_DCS_CMD(0x12, 0x50),
_INIT_DCS_CMD(0x13, 0x56),
_INIT_DCS_CMD(0x14, 0x57),
_INIT_DCS_CMD(0x15, 0x00),
_INIT_DCS_CMD(0x16, 0x10),
_INIT_DCS_CMD(0x17, 0xA0),
_INIT_DCS_CMD(0x18, 0x86),
_INIT_DCS_CMD(0x19, 0x0D),
_INIT_DCS_CMD(0x1A, 0x7F),
_INIT_DCS_CMD(0x1B, 0x0C),
_INIT_DCS_CMD(0x1C, 0xBF),
_INIT_DCS_CMD(0x22, 0x00),
_INIT_DCS_CMD(0x23, 0x00),
_INIT_DCS_CMD(0x2A, 0x0D),
_INIT_DCS_CMD(0x2B, 0x7F),
_INIT_DCS_CMD(0x1D, 0x00),
_INIT_DCS_CMD(0x1E, 0x65),
_INIT_DCS_CMD(0x1F, 0x65),
_INIT_DCS_CMD(0x24, 0x00),
_INIT_DCS_CMD(0x25, 0x65),
_INIT_DCS_CMD(0x2F, 0x05),
_INIT_DCS_CMD(0x30, 0x65),
_INIT_DCS_CMD(0x31, 0x05),
_INIT_DCS_CMD(0x32, 0x7D),
_INIT_DCS_CMD(0x39, 0x00),
_INIT_DCS_CMD(0x3A, 0x65),
_INIT_DCS_CMD(0x20, 0x01),
_INIT_DCS_CMD(0x33, 0x11),
_INIT_DCS_CMD(0x34, 0x78),
_INIT_DCS_CMD(0x35, 0x16),
_INIT_DCS_CMD(0xC8, 0x04),
_INIT_DCS_CMD(0xC9, 0x9E),
_INIT_DCS_CMD(0xCA, 0x4E),
_INIT_DCS_CMD(0xCB, 0x00),
_INIT_DCS_CMD(0xA9, 0x49),
_INIT_DCS_CMD(0xAA, 0x4B),
_INIT_DCS_CMD(0xAB, 0x48),
_INIT_DCS_CMD(0xAC, 0x43),
_INIT_DCS_CMD(0xAD, 0x40),
_INIT_DCS_CMD(0xAE, 0x50),
_INIT_DCS_CMD(0xAF, 0x44),
_INIT_DCS_CMD(0xB0, 0x54),
_INIT_DCS_CMD(0xB1, 0x4E),
_INIT_DCS_CMD(0xB2, 0x4D),
_INIT_DCS_CMD(0xB3, 0x4C),
_INIT_DCS_CMD(0xB4, 0x41),
_INIT_DCS_CMD(0xB5, 0x47),
_INIT_DCS_CMD(0xB6, 0x53),
_INIT_DCS_CMD(0xB7, 0x3E),
_INIT_DCS_CMD(0xB8, 0x51),
_INIT_DCS_CMD(0xB9, 0x3C),
_INIT_DCS_CMD(0xBA, 0x3B),
_INIT_DCS_CMD(0xBB, 0x46),
_INIT_DCS_CMD(0xBC, 0x45),
_INIT_DCS_CMD(0xBD, 0x55),
_INIT_DCS_CMD(0xBE, 0x3D),
_INIT_DCS_CMD(0xBF, 0x3F),
_INIT_DCS_CMD(0xC0, 0x52),
_INIT_DCS_CMD(0xC1, 0x4A),
_INIT_DCS_CMD(0xC2, 0x39),
_INIT_DCS_CMD(0xC3, 0x4F),
_INIT_DCS_CMD(0xC4, 0x3A),
_INIT_DCS_CMD(0xC5, 0x42),
_INIT_DCS_CMD(0xFF, 0x27),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x56, 0x06),
_INIT_DCS_CMD(0x58, 0x80),
_INIT_DCS_CMD(0x59, 0x75),
_INIT_DCS_CMD(0x5A, 0x00),
_INIT_DCS_CMD(0x5B, 0x02),
_INIT_DCS_CMD(0x5C, 0x00),
_INIT_DCS_CMD(0x5D, 0x00),
_INIT_DCS_CMD(0x5E, 0x20),
_INIT_DCS_CMD(0x5F, 0x10),
_INIT_DCS_CMD(0x60, 0x00),
_INIT_DCS_CMD(0x61, 0x2E),
_INIT_DCS_CMD(0x62, 0x00),
_INIT_DCS_CMD(0x63, 0x01),
_INIT_DCS_CMD(0x64, 0x43),
_INIT_DCS_CMD(0x65, 0x2D),
_INIT_DCS_CMD(0x66, 0x00),
_INIT_DCS_CMD(0x67, 0x01),
_INIT_DCS_CMD(0x68, 0x44),
_INIT_DCS_CMD(0x00, 0x00),
_INIT_DCS_CMD(0x78, 0x00),
_INIT_DCS_CMD(0xC3, 0x00),
_INIT_DCS_CMD(0xFF, 0x2A),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x22, 0x2F),
_INIT_DCS_CMD(0x23, 0x08),
_INIT_DCS_CMD(0x24, 0x00),
_INIT_DCS_CMD(0x25, 0x65),
_INIT_DCS_CMD(0x26, 0xF8),
_INIT_DCS_CMD(0x27, 0x00),
_INIT_DCS_CMD(0x28, 0x1A),
_INIT_DCS_CMD(0x29, 0x00),
_INIT_DCS_CMD(0x2A, 0x1A),
_INIT_DCS_CMD(0x2B, 0x00),
_INIT_DCS_CMD(0x2D, 0x1A),
_INIT_DCS_CMD(0xFF, 0x23),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0x80),
_INIT_DCS_CMD(0x07, 0x00),
_INIT_DCS_CMD(0xFF, 0xE0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x14, 0x60),
_INIT_DCS_CMD(0x16, 0xC0),
_INIT_DCS_CMD(0xFF, 0xF0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x3A, 0x08),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0xB9, 0x01),
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x18, 0x40),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0xB9, 0x02),
_INIT_DCS_CMD(0x35, 0x00),
_INIT_DCS_CMD(0x51, 0x00, 0xFF),
_INIT_DCS_CMD(0x53, 0x24),
_INIT_DCS_CMD(0x55, 0x00),
_INIT_DCS_CMD(0xBB, 0x13),
_INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
_INIT_DELAY_CMD(100),
_INIT_DCS_CMD(0x11),
_INIT_DELAY_CMD(200),
_INIT_DCS_CMD(0x29),
_INIT_DELAY_CMD(100),
{},
};
static const struct panel_init_cmd inx_hj110iz_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0xD1),
_INIT_DCS_CMD(0x06, 0xC0),
_INIT_DCS_CMD(0x07, 0x87),
_INIT_DCS_CMD(0x08, 0x4B),
_INIT_DCS_CMD(0x0D, 0x63),
_INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x69),
_INIT_DCS_CMD(0x94, 0x00),
_INIT_DCS_CMD(0x95, 0xF5),
_INIT_DCS_CMD(0x96, 0xF5),
_INIT_DCS_CMD(0x9D, 0x00),
_INIT_DCS_CMD(0x9E, 0x00),
_INIT_DCS_CMD(0x69, 0x98),
_INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0xB3),
_INIT_DCS_CMD(0x58, 0x43),
_INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x91, 0x44),
_INIT_DCS_CMD(0x92, 0x4C),
_INIT_DCS_CMD(0x94, 0x86),
_INIT_DCS_CMD(0x60, 0x96),
_INIT_DCS_CMD(0x61, 0xD0),
_INIT_DCS_CMD(0x63, 0x70),
_INIT_DCS_CMD(0xC2, 0xCA),
_INIT_DCS_CMD(0x00, 0x03),
_INIT_DCS_CMD(0x01, 0x03),
_INIT_DCS_CMD(0x02, 0x03),
_INIT_DCS_CMD(0x03, 0x29),
_INIT_DCS_CMD(0x04, 0x22),
_INIT_DCS_CMD(0x05, 0x22),
_INIT_DCS_CMD(0x06, 0x0B),
_INIT_DCS_CMD(0x07, 0x1D),
_INIT_DCS_CMD(0x08, 0x1C),
_INIT_DCS_CMD(0x09, 0x05),
_INIT_DCS_CMD(0x0A, 0x08),
_INIT_DCS_CMD(0x0B, 0x09),
_INIT_DCS_CMD(0x0C, 0x0A),
_INIT_DCS_CMD(0x0D, 0x0C),
_INIT_DCS_CMD(0x0E, 0x0D),
_INIT_DCS_CMD(0x0F, 0x0E),
_INIT_DCS_CMD(0x10, 0x0F),
_INIT_DCS_CMD(0x11, 0x10),
_INIT_DCS_CMD(0x12, 0x11),
_INIT_DCS_CMD(0x13, 0x04),
_INIT_DCS_CMD(0x14, 0x00),
_INIT_DCS_CMD(0x15, 0x03),
_INIT_DCS_CMD(0x16, 0x03),
_INIT_DCS_CMD(0x17, 0x03),
_INIT_DCS_CMD(0x18, 0x03),
_INIT_DCS_CMD(0x19, 0x29),
_INIT_DCS_CMD(0x1A, 0x22),
_INIT_DCS_CMD(0x1B, 0x22),
_INIT_DCS_CMD(0x1C, 0x0B),
_INIT_DCS_CMD(0x1D, 0x1D),
_INIT_DCS_CMD(0x1E, 0x1C),
_INIT_DCS_CMD(0x1F, 0x05),
_INIT_DCS_CMD(0x20, 0x08),
_INIT_DCS_CMD(0x21, 0x09),
_INIT_DCS_CMD(0x22, 0x0A),
_INIT_DCS_CMD(0x23, 0x0C),
_INIT_DCS_CMD(0x24, 0x0D),
_INIT_DCS_CMD(0x25, 0x0E),
_INIT_DCS_CMD(0x26, 0x0F),
_INIT_DCS_CMD(0x27, 0x10),
_INIT_DCS_CMD(0x28, 0x11),
_INIT_DCS_CMD(0x29, 0x04),
_INIT_DCS_CMD(0x2A, 0x00),
_INIT_DCS_CMD(0x2B, 0x03),
_INIT_DCS_CMD(0x2F, 0x0A),
_INIT_DCS_CMD(0x30, 0x35),
_INIT_DCS_CMD(0x37, 0xA7),
_INIT_DCS_CMD(0x39, 0x00),
_INIT_DCS_CMD(0x3A, 0x46),
_INIT_DCS_CMD(0x3B, 0x32),
_INIT_DCS_CMD(0x3D, 0x12),
_INIT_DCS_CMD(0x3F, 0x33),
_INIT_DCS_CMD(0x40, 0x31),
_INIT_DCS_CMD(0x41, 0x40),
_INIT_DCS_CMD(0x42, 0x42),
_INIT_DCS_CMD(0x47, 0x77),
_INIT_DCS_CMD(0x48, 0x77),
_INIT_DCS_CMD(0x4A, 0x45),
_INIT_DCS_CMD(0x4B, 0x45),
_INIT_DCS_CMD(0x4C, 0x14),
_INIT_DCS_CMD(0x4D, 0x21),
_INIT_DCS_CMD(0x4E, 0x43),
_INIT_DCS_CMD(0x4F, 0x65),
_INIT_DCS_CMD(0x55, 0x06),
_INIT_DCS_CMD(0x56, 0x06),
_INIT_DCS_CMD(0x58, 0x21),
_INIT_DCS_CMD(0x59, 0x70),
_INIT_DCS_CMD(0x5A, 0x46),
_INIT_DCS_CMD(0x5B, 0x32),
_INIT_DCS_CMD(0x5C, 0x88),
_INIT_DCS_CMD(0x5E, 0x00, 0x00),
_INIT_DCS_CMD(0x5F, 0x00),
_INIT_DCS_CMD(0x7A, 0xFF),
_INIT_DCS_CMD(0x7B, 0xFF),
_INIT_DCS_CMD(0x7C, 0x00),
_INIT_DCS_CMD(0x7D, 0x00),
_INIT_DCS_CMD(0x7E, 0x20),
_INIT_DCS_CMD(0x7F, 0x3C),
_INIT_DCS_CMD(0x80, 0x00),
_INIT_DCS_CMD(0x81, 0x00),
_INIT_DCS_CMD(0x82, 0x08),
_INIT_DCS_CMD(0x97, 0x02),
_INIT_DCS_CMD(0xC5, 0x10),
_INIT_DCS_CMD(0xD7, 0x55),
_INIT_DCS_CMD(0xD8, 0x55),
_INIT_DCS_CMD(0xD9, 0x23),
_INIT_DCS_CMD(0xDA, 0x05),
_INIT_DCS_CMD(0xDB, 0x01),
_INIT_DCS_CMD(0xDC, 0x65),
_INIT_DCS_CMD(0xDD, 0x55),
_INIT_DCS_CMD(0xDE, 0x27),
_INIT_DCS_CMD(0xDF, 0x01),
_INIT_DCS_CMD(0xE0, 0x65),
_INIT_DCS_CMD(0xE1, 0x01),
_INIT_DCS_CMD(0xE2, 0x65),
_INIT_DCS_CMD(0xE3, 0x01),
_INIT_DCS_CMD(0xE4, 0x65),
_INIT_DCS_CMD(0xE5, 0x01),
_INIT_DCS_CMD(0xE6, 0x65),
_INIT_DCS_CMD(0xE7, 0x00),
_INIT_DCS_CMD(0xE8, 0x00),
_INIT_DCS_CMD(0xE9, 0x01),
_INIT_DCS_CMD(0xEA, 0x65),
_INIT_DCS_CMD(0xEB, 0x01),
_INIT_DCS_CMD(0xEE, 0x65),
_INIT_DCS_CMD(0xEF, 0x01),
_INIT_DCS_CMD(0xF0, 0x65),
_INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0xFF, 0x25),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0x00),
_INIT_DCS_CMD(0xF1, 0x10),
_INIT_DCS_CMD(0x1E, 0x00),
_INIT_DCS_CMD(0x1F, 0x46),
_INIT_DCS_CMD(0x20, 0x32),
_INIT_DCS_CMD(0x25, 0x00),
_INIT_DCS_CMD(0x26, 0x46),
_INIT_DCS_CMD(0x27, 0x32),
_INIT_DCS_CMD(0x3F, 0x80),
_INIT_DCS_CMD(0x40, 0x00),
_INIT_DCS_CMD(0x43, 0x00),
_INIT_DCS_CMD(0x44, 0x46),
_INIT_DCS_CMD(0x45, 0x46),
_INIT_DCS_CMD(0x48, 0x46),
_INIT_DCS_CMD(0x49, 0x32),
_INIT_DCS_CMD(0x5B, 0x80),
_INIT_DCS_CMD(0x5C, 0x00),
_INIT_DCS_CMD(0x5D, 0x46),
_INIT_DCS_CMD(0x5E, 0x32),
_INIT_DCS_CMD(0x5F, 0x46),
_INIT_DCS_CMD(0x60, 0x32),
_INIT_DCS_CMD(0x61, 0x46),
_INIT_DCS_CMD(0x62, 0x32),
_INIT_DCS_CMD(0x68, 0x0C),
_INIT_DCS_CMD(0x6C, 0x0D),
_INIT_DCS_CMD(0x6E, 0x0D),
_INIT_DCS_CMD(0x78, 0x00),
_INIT_DCS_CMD(0x79, 0xC5),
_INIT_DCS_CMD(0x7A, 0x0C),
_INIT_DCS_CMD(0x7B, 0xB0),
_INIT_DCS_CMD(0xFF, 0x26),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0xA1),
_INIT_DCS_CMD(0x02, 0x31),
_INIT_DCS_CMD(0x0A, 0xF4),
_INIT_DCS_CMD(0x04, 0x50),
_INIT_DCS_CMD(0x06, 0x30),
_INIT_DCS_CMD(0x0C, 0x16),
_INIT_DCS_CMD(0x0D, 0x0D),
_INIT_DCS_CMD(0x0F, 0x00),
_INIT_DCS_CMD(0x11, 0x00),
_INIT_DCS_CMD(0x12, 0x50),
_INIT_DCS_CMD(0x13, 0x40),
_INIT_DCS_CMD(0x14, 0x58),
_INIT_DCS_CMD(0x15, 0x00),
_INIT_DCS_CMD(0x16, 0x10),
_INIT_DCS_CMD(0x17, 0xA0),
_INIT_DCS_CMD(0x18, 0x86),
_INIT_DCS_CMD(0x22, 0x00),
_INIT_DCS_CMD(0x23, 0x00),
_INIT_DCS_CMD(0x19, 0x0E),
_INIT_DCS_CMD(0x1A, 0x31),
_INIT_DCS_CMD(0x1B, 0x0D),
_INIT_DCS_CMD(0x1C, 0x29),
_INIT_DCS_CMD(0x2A, 0x0E),
_INIT_DCS_CMD(0x2B, 0x31),
_INIT_DCS_CMD(0x1D, 0x00),
_INIT_DCS_CMD(0x1E, 0x62),
_INIT_DCS_CMD(0x1F, 0x62),
_INIT_DCS_CMD(0x2F, 0x06),
_INIT_DCS_CMD(0x30, 0x62),
_INIT_DCS_CMD(0x31, 0x06),
_INIT_DCS_CMD(0x32, 0x7F),
_INIT_DCS_CMD(0x33, 0x11),
_INIT_DCS_CMD(0x34, 0x89),
_INIT_DCS_CMD(0x35, 0x67),
_INIT_DCS_CMD(0x39, 0x0B),
_INIT_DCS_CMD(0x3A, 0x62),
_INIT_DCS_CMD(0x3B, 0x06),
_INIT_DCS_CMD(0xC8, 0x04),
_INIT_DCS_CMD(0xC9, 0x89),
_INIT_DCS_CMD(0xCA, 0x4E),
_INIT_DCS_CMD(0xCB, 0x00),
_INIT_DCS_CMD(0xA9, 0x3F),
_INIT_DCS_CMD(0xAA, 0x3E),
_INIT_DCS_CMD(0xAB, 0x3D),
_INIT_DCS_CMD(0xAC, 0x3C),
_INIT_DCS_CMD(0xAD, 0x3B),
_INIT_DCS_CMD(0xAE, 0x3A),
_INIT_DCS_CMD(0xAF, 0x39),
_INIT_DCS_CMD(0xB0, 0x38),
_INIT_DCS_CMD(0xFF, 0x27),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0xD0, 0x11),
_INIT_DCS_CMD(0xD1, 0x54),
_INIT_DCS_CMD(0xDE, 0x43),
_INIT_DCS_CMD(0xDF, 0x02),
_INIT_DCS_CMD(0xC0, 0x18),
_INIT_DCS_CMD(0xC1, 0x00),
_INIT_DCS_CMD(0xC2, 0x00),
_INIT_DCS_CMD(0x00, 0x00),
_INIT_DCS_CMD(0xC3, 0x00),
_INIT_DCS_CMD(0x56, 0x06),
_INIT_DCS_CMD(0x58, 0x80),
_INIT_DCS_CMD(0x59, 0x78),
_INIT_DCS_CMD(0x5A, 0x00),
_INIT_DCS_CMD(0x5B, 0x18),
_INIT_DCS_CMD(0x5C, 0x00),
_INIT_DCS_CMD(0x5D, 0x01),
_INIT_DCS_CMD(0x5E, 0x20),
_INIT_DCS_CMD(0x5F, 0x10),
_INIT_DCS_CMD(0x60, 0x00),
_INIT_DCS_CMD(0x61, 0x1C),
_INIT_DCS_CMD(0x62, 0x00),
_INIT_DCS_CMD(0x63, 0x01),
_INIT_DCS_CMD(0x64, 0x44),
_INIT_DCS_CMD(0x65, 0x1B),
_INIT_DCS_CMD(0x66, 0x00),
_INIT_DCS_CMD(0x67, 0x01),
_INIT_DCS_CMD(0x68, 0x44),
_INIT_DCS_CMD(0x98, 0x01),
_INIT_DCS_CMD(0xB4, 0x03),
_INIT_DCS_CMD(0x9B, 0xBE),
_INIT_DCS_CMD(0xAB, 0x14),
_INIT_DCS_CMD(0xBC, 0x08),
_INIT_DCS_CMD(0xBD, 0x28),
_INIT_DCS_CMD(0xFF, 0x2A),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x22, 0x2F),
_INIT_DCS_CMD(0x23, 0x08),
_INIT_DCS_CMD(0x24, 0x00),
_INIT_DCS_CMD(0x25, 0x62),
_INIT_DCS_CMD(0x26, 0xF8),
_INIT_DCS_CMD(0x27, 0x00),
_INIT_DCS_CMD(0x28, 0x1A),
_INIT_DCS_CMD(0x29, 0x00),
_INIT_DCS_CMD(0x2A, 0x1A),
_INIT_DCS_CMD(0x2B, 0x00),
_INIT_DCS_CMD(0x2D, 0x1A),
_INIT_DCS_CMD(0x64, 0x96),
_INIT_DCS_CMD(0x65, 0x10),
_INIT_DCS_CMD(0x66, 0x00),
_INIT_DCS_CMD(0x67, 0x96),
_INIT_DCS_CMD(0x68, 0x10),
_INIT_DCS_CMD(0x69, 0x00),
_INIT_DCS_CMD(0x6A, 0x96),
_INIT_DCS_CMD(0x6B, 0x10),
_INIT_DCS_CMD(0x6C, 0x00),
_INIT_DCS_CMD(0x70, 0x92),
_INIT_DCS_CMD(0x71, 0x10),
_INIT_DCS_CMD(0x72, 0x00),
_INIT_DCS_CMD(0x79, 0x96),
_INIT_DCS_CMD(0x7A, 0x10),
_INIT_DCS_CMD(0x88, 0x96),
_INIT_DCS_CMD(0x89, 0x10),
_INIT_DCS_CMD(0xA2, 0x3F),
_INIT_DCS_CMD(0xA3, 0x30),
_INIT_DCS_CMD(0xA4, 0xC0),
_INIT_DCS_CMD(0xA5, 0x03),
_INIT_DCS_CMD(0xE8, 0x00),
_INIT_DCS_CMD(0x97, 0x3C),
_INIT_DCS_CMD(0x98, 0x02),
_INIT_DCS_CMD(0x99, 0x95),
_INIT_DCS_CMD(0x9A, 0x06),
_INIT_DCS_CMD(0x9B, 0x00),
_INIT_DCS_CMD(0x9C, 0x0B),
_INIT_DCS_CMD(0x9D, 0x0A),
_INIT_DCS_CMD(0x9E, 0x90),
_INIT_DCS_CMD(0xFF, 0x25),
_INIT_DCS_CMD(0x13, 0x02),
_INIT_DCS_CMD(0x14, 0xD7),
_INIT_DCS_CMD(0xDB, 0x02),
_INIT_DCS_CMD(0xDC, 0xD7),
_INIT_DCS_CMD(0x17, 0xCF),
_INIT_DCS_CMD(0x19, 0x0F),
_INIT_DCS_CMD(0x1B, 0x5B),
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x24, 0x00, 0x38, 0x00, 0x4C, 0x00, 0x5E, 0x00, 0x6F, 0x00, 0x7E),
_INIT_DCS_CMD(0xB1, 0x00, 0x8C, 0x00, 0xBE, 0x00, 0xE5, 0x01, 0x27, 0x01, 0x58, 0x01, 0xA8, 0x01, 0xE8, 0x01, 0xEA),
_INIT_DCS_CMD(0xB2, 0x02, 0x28, 0x02, 0x71, 0x02, 0x9E, 0x02, 0xDA, 0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51),
_INIT_DCS_CMD(0xB3, 0x03, 0x62, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9C, 0x03, 0xAA, 0x03, 0xB2),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x27, 0x00, 0x3D, 0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84),
_INIT_DCS_CMD(0xB5, 0x00, 0x93, 0x00, 0xC5, 0x00, 0xEC, 0x01, 0x2C, 0x01, 0x5D, 0x01, 0xAC, 0x01, 0xEC, 0x01, 0xEE),
_INIT_DCS_CMD(0xB6, 0x02, 0x2B, 0x02, 0x73, 0x02, 0xA0, 0x02, 0xDB, 0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51),
_INIT_DCS_CMD(0xB7, 0x03, 0x63, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9C, 0x03, 0xAA, 0x03, 0xB2),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x2A, 0x00, 0x40, 0x00, 0x56, 0x00, 0x68, 0x00, 0x7A, 0x00, 0x89),
_INIT_DCS_CMD(0xB9, 0x00, 0x98, 0x00, 0xC9, 0x00, 0xF1, 0x01, 0x30, 0x01, 0x61, 0x01, 0xB0, 0x01, 0xEF, 0x01, 0xF1),
_INIT_DCS_CMD(0xBA, 0x02, 0x2E, 0x02, 0x76, 0x02, 0xA3, 0x02, 0xDD, 0x03, 0x02, 0x03, 0x32, 0x03, 0x42, 0x03, 0x53),
_INIT_DCS_CMD(0xBB, 0x03, 0x66, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9C, 0x03, 0xAA, 0x03, 0xB2),
_INIT_DCS_CMD(0xFF, 0x21),
_INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x24, 0x00, 0x38, 0x00, 0x4C, 0x00, 0x5E, 0x00, 0x6F, 0x00, 0x7E),
_INIT_DCS_CMD(0xB1, 0x00, 0x8C, 0x00, 0xBE, 0x00, 0xE5, 0x01, 0x27, 0x01, 0x58, 0x01, 0xA8, 0x01, 0xE8, 0x01, 0xEA),
_INIT_DCS_CMD(0xB2, 0x02, 0x28, 0x02, 0x71, 0x02, 0x9E, 0x02, 0xDA, 0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51),
_INIT_DCS_CMD(0xB3, 0x03, 0x62, 0x03, 0x77, 0x03, 0x90, 0x03, 0xAC, 0x03, 0xCA, 0x03, 0xDA),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x27, 0x00, 0x3D, 0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84),
_INIT_DCS_CMD(0xB5, 0x00, 0x93, 0x00, 0xC5, 0x00, 0xEC, 0x01, 0x2C, 0x01, 0x5D, 0x01, 0xAC, 0x01, 0xEC, 0x01, 0xEE),
_INIT_DCS_CMD(0xB6, 0x02, 0x2B, 0x02, 0x73, 0x02, 0xA0, 0x02, 0xDB, 0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51),
_INIT_DCS_CMD(0xB7, 0x03, 0x63, 0x03, 0x77, 0x03, 0x90, 0x03, 0xAC, 0x03, 0xCA, 0x03, 0xDA),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x2A, 0x00, 0x40, 0x00, 0x56, 0x00, 0x68, 0x00, 0x7A, 0x00, 0x89),
_INIT_DCS_CMD(0xB9, 0x00, 0x98, 0x00, 0xC9, 0x00, 0xF1, 0x01, 0x30, 0x01, 0x61, 0x01, 0xB0, 0x01, 0xEF, 0x01, 0xF1),
_INIT_DCS_CMD(0xBA, 0x02, 0x2E, 0x02, 0x76, 0x02, 0xA3, 0x02, 0xDD, 0x03, 0x02, 0x03, 0x32, 0x03, 0x42, 0x03, 0x53),
_INIT_DCS_CMD(0xBB, 0x03, 0x66, 0x03, 0x77, 0x03, 0x90, 0x03, 0xAC, 0x03, 0xCA, 0x03, 0xDA),
_INIT_DCS_CMD(0xFF, 0xF0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x3A, 0x08),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xB9, 0x01),
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0x18, 0x40),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xB9, 0x02),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0xB0, 0x01),
_INIT_DCS_CMD(0x35, 0x00),
_INIT_DCS_CMD(0x3B, 0x03, 0xAE, 0x1A, 0x04, 0x04),
_INIT_DELAY_CMD(100),
_INIT_DCS_CMD(0x11),
_INIT_DELAY_CMD(200),
_INIT_DCS_CMD(0x29),
_INIT_DELAY_CMD(100),
{},
};
static const struct panel_init_cmd boe_init_cmd[] = {
_INIT_DCS_CMD(0xB0, 0x05),
_INIT_DCS_CMD(0xB1, 0xE5),
_INIT_DCS_CMD(0xB3, 0x52),
_INIT_DCS_CMD(0xB0, 0x00),
_INIT_DCS_CMD(0xB3, 0x88),
_INIT_DCS_CMD(0xB0, 0x04),
_INIT_DCS_CMD(0xB8, 0x00),
_INIT_DCS_CMD(0xB0, 0x00),
_INIT_DCS_CMD(0xB6, 0x03),
_INIT_DCS_CMD(0xBA, 0x8B),
_INIT_DCS_CMD(0xBF, 0x1A),
_INIT_DCS_CMD(0xC0, 0x0F),
_INIT_DCS_CMD(0xC2, 0x0C),
_INIT_DCS_CMD(0xC3, 0x02),
_INIT_DCS_CMD(0xC4, 0x0C),
_INIT_DCS_CMD(0xC5, 0x02),
_INIT_DCS_CMD(0xB0, 0x01),
_INIT_DCS_CMD(0xE0, 0x26),
_INIT_DCS_CMD(0xE1, 0x26),
_INIT_DCS_CMD(0xDC, 0x00),
_INIT_DCS_CMD(0xDD, 0x00),
_INIT_DCS_CMD(0xCC, 0x26),
_INIT_DCS_CMD(0xCD, 0x26),
_INIT_DCS_CMD(0xC8, 0x00),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xD2, 0x03),
_INIT_DCS_CMD(0xD3, 0x03),
_INIT_DCS_CMD(0xE6, 0x04),
_INIT_DCS_CMD(0xE7, 0x04),
_INIT_DCS_CMD(0xC4, 0x09),
_INIT_DCS_CMD(0xC5, 0x09),
_INIT_DCS_CMD(0xD8, 0x0A),
_INIT_DCS_CMD(0xD9, 0x0A),
_INIT_DCS_CMD(0xC2, 0x0B),
_INIT_DCS_CMD(0xC3, 0x0B),
_INIT_DCS_CMD(0xD6, 0x0C),
_INIT_DCS_CMD(0xD7, 0x0C),
_INIT_DCS_CMD(0xC0, 0x05),
_INIT_DCS_CMD(0xC1, 0x05),
_INIT_DCS_CMD(0xD4, 0x06),
_INIT_DCS_CMD(0xD5, 0x06),
_INIT_DCS_CMD(0xCA, 0x07),
_INIT_DCS_CMD(0xCB, 0x07),
_INIT_DCS_CMD(0xDE, 0x08),
_INIT_DCS_CMD(0xDF, 0x08),
_INIT_DCS_CMD(0xB0, 0x02),
_INIT_DCS_CMD(0xC0, 0x00),
_INIT_DCS_CMD(0xC1, 0x0D),
_INIT_DCS_CMD(0xC2, 0x17),
_INIT_DCS_CMD(0xC3, 0x26),
_INIT_DCS_CMD(0xC4, 0x31),
_INIT_DCS_CMD(0xC5, 0x1C),
_INIT_DCS_CMD(0xC6, 0x2C),
_INIT_DCS_CMD(0xC7, 0x33),
_INIT_DCS_CMD(0xC8, 0x31),
_INIT_DCS_CMD(0xC9, 0x37),
_INIT_DCS_CMD(0xCA, 0x37),
_INIT_DCS_CMD(0xCB, 0x37),
_INIT_DCS_CMD(0xCC, 0x39),
_INIT_DCS_CMD(0xCD, 0x2E),
_INIT_DCS_CMD(0xCE, 0x2F),
_INIT_DCS_CMD(0xCF, 0x2F),
_INIT_DCS_CMD(0xD0, 0x07),
_INIT_DCS_CMD(0xD2, 0x00),
_INIT_DCS_CMD(0xD3, 0x0D),
_INIT_DCS_CMD(0xD4, 0x17),
_INIT_DCS_CMD(0xD5, 0x26),
_INIT_DCS_CMD(0xD6, 0x31),
_INIT_DCS_CMD(0xD7, 0x3F),
_INIT_DCS_CMD(0xD8, 0x3F),
_INIT_DCS_CMD(0xD9, 0x3F),
_INIT_DCS_CMD(0xDA, 0x3F),
_INIT_DCS_CMD(0xDB, 0x37),
_INIT_DCS_CMD(0xDC, 0x37),
_INIT_DCS_CMD(0xDD, 0x37),
_INIT_DCS_CMD(0xDE, 0x39),
_INIT_DCS_CMD(0xDF, 0x2E),
_INIT_DCS_CMD(0xE0, 0x2F),
_INIT_DCS_CMD(0xE1, 0x2F),
_INIT_DCS_CMD(0xE2, 0x07),
_INIT_DCS_CMD(0xB0, 0x03),
_INIT_DCS_CMD(0xC8, 0x0B),
_INIT_DCS_CMD(0xC9, 0x07),
_INIT_DCS_CMD(0xC3, 0x00),
_INIT_DCS_CMD(0xE7, 0x00),
_INIT_DCS_CMD(0xC5, 0x2A),
_INIT_DCS_CMD(0xDE, 0x2A),
_INIT_DCS_CMD(0xCA, 0x43),
_INIT_DCS_CMD(0xC9, 0x07),
_INIT_DCS_CMD(0xE4, 0xC0),
_INIT_DCS_CMD(0xE5, 0x0D),
_INIT_DCS_CMD(0xCB, 0x00),
_INIT_DCS_CMD(0xB0, 0x06),
_INIT_DCS_CMD(0xB8, 0xA5),
_INIT_DCS_CMD(0xC0, 0xA5),
_INIT_DCS_CMD(0xC7, 0x0F),
_INIT_DCS_CMD(0xD5, 0x32),
_INIT_DCS_CMD(0xB8, 0x00),
_INIT_DCS_CMD(0xC0, 0x00),
_INIT_DCS_CMD(0xBC, 0x00),
_INIT_DCS_CMD(0xB0, 0x07),
_INIT_DCS_CMD(0xB1, 0x00),
_INIT_DCS_CMD(0xB2, 0x02),
_INIT_DCS_CMD(0xB3, 0x0F),
_INIT_DCS_CMD(0xB4, 0x25),
_INIT_DCS_CMD(0xB5, 0x39),
_INIT_DCS_CMD(0xB6, 0x4E),
_INIT_DCS_CMD(0xB7, 0x72),
_INIT_DCS_CMD(0xB8, 0x97),
_INIT_DCS_CMD(0xB9, 0xDC),
_INIT_DCS_CMD(0xBA, 0x22),
_INIT_DCS_CMD(0xBB, 0xA4),
_INIT_DCS_CMD(0xBC, 0x2B),
_INIT_DCS_CMD(0xBD, 0x2F),
_INIT_DCS_CMD(0xBE, 0xA9),
_INIT_DCS_CMD(0xBF, 0x25),
_INIT_DCS_CMD(0xC0, 0x61),
_INIT_DCS_CMD(0xC1, 0x97),
_INIT_DCS_CMD(0xC2, 0xB2),
_INIT_DCS_CMD(0xC3, 0xCD),
_INIT_DCS_CMD(0xC4, 0xD9),
_INIT_DCS_CMD(0xC5, 0xE7),
_INIT_DCS_CMD(0xC6, 0xF4),
_INIT_DCS_CMD(0xC7, 0xFA),
_INIT_DCS_CMD(0xC8, 0xFC),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xCA, 0x00),
_INIT_DCS_CMD(0xCB, 0x16),
_INIT_DCS_CMD(0xCC, 0xAF),
_INIT_DCS_CMD(0xCD, 0xFF),
_INIT_DCS_CMD(0xCE, 0xFF),
_INIT_DCS_CMD(0xB0, 0x08),
_INIT_DCS_CMD(0xB1, 0x04),
_INIT_DCS_CMD(0xB2, 0x05),
_INIT_DCS_CMD(0xB3, 0x11),
_INIT_DCS_CMD(0xB4, 0x24),
_INIT_DCS_CMD(0xB5, 0x39),
_INIT_DCS_CMD(0xB6, 0x4F),
_INIT_DCS_CMD(0xB7, 0x72),
_INIT_DCS_CMD(0xB8, 0x98),
_INIT_DCS_CMD(0xB9, 0xDC),
_INIT_DCS_CMD(0xBA, 0x23),
_INIT_DCS_CMD(0xBB, 0xA6),
_INIT_DCS_CMD(0xBC, 0x2C),
_INIT_DCS_CMD(0xBD, 0x30),
_INIT_DCS_CMD(0xBE, 0xAA),
_INIT_DCS_CMD(0xBF, 0x26),
_INIT_DCS_CMD(0xC0, 0x62),
_INIT_DCS_CMD(0xC1, 0x9B),
_INIT_DCS_CMD(0xC2, 0xB5),
_INIT_DCS_CMD(0xC3, 0xCF),
_INIT_DCS_CMD(0xC4, 0xDB),
_INIT_DCS_CMD(0xC5, 0xE8),
_INIT_DCS_CMD(0xC6, 0xF5),
_INIT_DCS_CMD(0xC7, 0xFA),
_INIT_DCS_CMD(0xC8, 0xFC),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xCA, 0x00),
_INIT_DCS_CMD(0xCB, 0x16),
_INIT_DCS_CMD(0xCC, 0xAF),
_INIT_DCS_CMD(0xCD, 0xFF),
_INIT_DCS_CMD(0xCE, 0xFF),
_INIT_DCS_CMD(0xB0, 0x09),
_INIT_DCS_CMD(0xB1, 0x04),
_INIT_DCS_CMD(0xB2, 0x02),
_INIT_DCS_CMD(0xB3, 0x16),
_INIT_DCS_CMD(0xB4, 0x24),
_INIT_DCS_CMD(0xB5, 0x3B),
_INIT_DCS_CMD(0xB6, 0x4F),
_INIT_DCS_CMD(0xB7, 0x73),
_INIT_DCS_CMD(0xB8, 0x99),
_INIT_DCS_CMD(0xB9, 0xE0),
_INIT_DCS_CMD(0xBA, 0x26),
_INIT_DCS_CMD(0xBB, 0xAD),
_INIT_DCS_CMD(0xBC, 0x36),
_INIT_DCS_CMD(0xBD, 0x3A),
_INIT_DCS_CMD(0xBE, 0xAE),
_INIT_DCS_CMD(0xBF, 0x2A),
_INIT_DCS_CMD(0xC0, 0x66),
_INIT_DCS_CMD(0xC1, 0x9E),
_INIT_DCS_CMD(0xC2, 0xB8),
_INIT_DCS_CMD(0xC3, 0xD1),
_INIT_DCS_CMD(0xC4, 0xDD),
_INIT_DCS_CMD(0xC5, 0xE9),
_INIT_DCS_CMD(0xC6, 0xF6),
_INIT_DCS_CMD(0xC7, 0xFA),
_INIT_DCS_CMD(0xC8, 0xFC),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xCA, 0x00),
_INIT_DCS_CMD(0xCB, 0x16),
_INIT_DCS_CMD(0xCC, 0xAF),
_INIT_DCS_CMD(0xCD, 0xFF),
_INIT_DCS_CMD(0xCE, 0xFF),
_INIT_DCS_CMD(0xB0, 0x0A),
_INIT_DCS_CMD(0xB1, 0x00),
_INIT_DCS_CMD(0xB2, 0x02),
_INIT_DCS_CMD(0xB3, 0x0F),
_INIT_DCS_CMD(0xB4, 0x25),
_INIT_DCS_CMD(0xB5, 0x39),
_INIT_DCS_CMD(0xB6, 0x4E),
_INIT_DCS_CMD(0xB7, 0x72),
_INIT_DCS_CMD(0xB8, 0x97),
_INIT_DCS_CMD(0xB9, 0xDC),
_INIT_DCS_CMD(0xBA, 0x22),
_INIT_DCS_CMD(0xBB, 0xA4),
_INIT_DCS_CMD(0xBC, 0x2B),
_INIT_DCS_CMD(0xBD, 0x2F),
_INIT_DCS_CMD(0xBE, 0xA9),
_INIT_DCS_CMD(0xBF, 0x25),
_INIT_DCS_CMD(0xC0, 0x61),
_INIT_DCS_CMD(0xC1, 0x97),
_INIT_DCS_CMD(0xC2, 0xB2),
_INIT_DCS_CMD(0xC3, 0xCD),
_INIT_DCS_CMD(0xC4, 0xD9),
_INIT_DCS_CMD(0xC5, 0xE7),
_INIT_DCS_CMD(0xC6, 0xF4),
_INIT_DCS_CMD(0xC7, 0xFA),
_INIT_DCS_CMD(0xC8, 0xFC),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xCA, 0x00),
_INIT_DCS_CMD(0xCB, 0x16),
_INIT_DCS_CMD(0xCC, 0xAF),
_INIT_DCS_CMD(0xCD, 0xFF),
_INIT_DCS_CMD(0xCE, 0xFF),
_INIT_DCS_CMD(0xB0, 0x0B),
_INIT_DCS_CMD(0xB1, 0x04),
_INIT_DCS_CMD(0xB2, 0x05),
_INIT_DCS_CMD(0xB3, 0x11),
_INIT_DCS_CMD(0xB4, 0x24),
_INIT_DCS_CMD(0xB5, 0x39),
_INIT_DCS_CMD(0xB6, 0x4F),
_INIT_DCS_CMD(0xB7, 0x72),
_INIT_DCS_CMD(0xB8, 0x98),
_INIT_DCS_CMD(0xB9, 0xDC),
_INIT_DCS_CMD(0xBA, 0x23),
_INIT_DCS_CMD(0xBB, 0xA6),
_INIT_DCS_CMD(0xBC, 0x2C),
_INIT_DCS_CMD(0xBD, 0x30),
_INIT_DCS_CMD(0xBE, 0xAA),
_INIT_DCS_CMD(0xBF, 0x26),
_INIT_DCS_CMD(0xC0, 0x62),
_INIT_DCS_CMD(0xC1, 0x9B),
_INIT_DCS_CMD(0xC2, 0xB5),
_INIT_DCS_CMD(0xC3, 0xCF),
_INIT_DCS_CMD(0xC4, 0xDB),
_INIT_DCS_CMD(0xC5, 0xE8),
_INIT_DCS_CMD(0xC6, 0xF5),
_INIT_DCS_CMD(0xC7, 0xFA),
_INIT_DCS_CMD(0xC8, 0xFC),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xCA, 0x00),
_INIT_DCS_CMD(0xCB, 0x16),
_INIT_DCS_CMD(0xCC, 0xAF),
_INIT_DCS_CMD(0xCD, 0xFF),
_INIT_DCS_CMD(0xCE, 0xFF),
_INIT_DCS_CMD(0xB0, 0x0C),
_INIT_DCS_CMD(0xB1, 0x04),
_INIT_DCS_CMD(0xB2, 0x02),
_INIT_DCS_CMD(0xB3, 0x16),
_INIT_DCS_CMD(0xB4, 0x24),
_INIT_DCS_CMD(0xB5, 0x3B),
_INIT_DCS_CMD(0xB6, 0x4F),
_INIT_DCS_CMD(0xB7, 0x73),
_INIT_DCS_CMD(0xB8, 0x99),
_INIT_DCS_CMD(0xB9, 0xE0),
_INIT_DCS_CMD(0xBA, 0x26),
_INIT_DCS_CMD(0xBB, 0xAD),
_INIT_DCS_CMD(0xBC, 0x36),
_INIT_DCS_CMD(0xBD, 0x3A),
_INIT_DCS_CMD(0xBE, 0xAE),
_INIT_DCS_CMD(0xBF, 0x2A),
_INIT_DCS_CMD(0xC0, 0x66),
_INIT_DCS_CMD(0xC1, 0x9E),
_INIT_DCS_CMD(0xC2, 0xB8),
_INIT_DCS_CMD(0xC3, 0xD1),
_INIT_DCS_CMD(0xC4, 0xDD),
_INIT_DCS_CMD(0xC5, 0xE9),
_INIT_DCS_CMD(0xC6, 0xF6),
_INIT_DCS_CMD(0xC7, 0xFA),
_INIT_DCS_CMD(0xC8, 0xFC),
_INIT_DCS_CMD(0xC9, 0x00),
_INIT_DCS_CMD(0xCA, 0x00),
_INIT_DCS_CMD(0xCB, 0x16),
_INIT_DCS_CMD(0xCC, 0xAF),
_INIT_DCS_CMD(0xCD, 0xFF),
_INIT_DCS_CMD(0xCE, 0xFF),
_INIT_DCS_CMD(0xB0, 0x00),
_INIT_DCS_CMD(0xB3, 0x08),
_INIT_DCS_CMD(0xB0, 0x04),
_INIT_DCS_CMD(0xB8, 0x68),
_INIT_DELAY_CMD(150),
{},
};
static const struct panel_init_cmd auo_kd101n80_45na_init_cmd[] = {
_INIT_DELAY_CMD(24),
_INIT_DCS_CMD(0x11),
_INIT_DELAY_CMD(120),
_INIT_DCS_CMD(0x29),
_INIT_DELAY_CMD(120),
{},
};
static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
_INIT_DELAY_CMD(24),
_INIT_DCS_CMD(0xB0, 0x01),
_INIT_DCS_CMD(0xC0, 0x48),
_INIT_DCS_CMD(0xC1, 0x48),
_INIT_DCS_CMD(0xC2, 0x47),
_INIT_DCS_CMD(0xC3, 0x47),
_INIT_DCS_CMD(0xC4, 0x46),
_INIT_DCS_CMD(0xC5, 0x46),
_INIT_DCS_CMD(0xC6, 0x45),
_INIT_DCS_CMD(0xC7, 0x45),
_INIT_DCS_CMD(0xC8, 0x64),
_INIT_DCS_CMD(0xC9, 0x64),
_INIT_DCS_CMD(0xCA, 0x4F),
_INIT_DCS_CMD(0xCB, 0x4F),
_INIT_DCS_CMD(0xCC, 0x40),
_INIT_DCS_CMD(0xCD, 0x40),
_INIT_DCS_CMD(0xCE, 0x66),
_INIT_DCS_CMD(0xCF, 0x66),
_INIT_DCS_CMD(0xD0, 0x4F),
_INIT_DCS_CMD(0xD1, 0x4F),
_INIT_DCS_CMD(0xD2, 0x41),
_INIT_DCS_CMD(0xD3, 0x41),
_INIT_DCS_CMD(0xD4, 0x48),
_INIT_DCS_CMD(0xD5, 0x48),
_INIT_DCS_CMD(0xD6, 0x47),
_INIT_DCS_CMD(0xD7, 0x47),
_INIT_DCS_CMD(0xD8, 0x46),
_INIT_DCS_CMD(0xD9, 0x46),
_INIT_DCS_CMD(0xDA, 0x45),
_INIT_DCS_CMD(0xDB, 0x45),
_INIT_DCS_CMD(0xDC, 0x64),
_INIT_DCS_CMD(0xDD, 0x64),
_INIT_DCS_CMD(0xDE, 0x4F),
_INIT_DCS_CMD(0xDF, 0x4F),
_INIT_DCS_CMD(0xE0, 0x40),
_INIT_DCS_CMD(0xE1, 0x40),
_INIT_DCS_CMD(0xE2, 0x66),
_INIT_DCS_CMD(0xE3, 0x66),
_INIT_DCS_CMD(0xE4, 0x4F),
_INIT_DCS_CMD(0xE5, 0x4F),
_INIT_DCS_CMD(0xE6, 0x41),
_INIT_DCS_CMD(0xE7, 0x41),
_INIT_DELAY_CMD(150),
{},
};
static const struct panel_init_cmd starry_qfh032011_53g_init_cmd[] = {
_INIT_DCS_CMD(0xB0, 0x01),
_INIT_DCS_CMD(0xC3, 0x4F),
_INIT_DCS_CMD(0xC4, 0x40),
_INIT_DCS_CMD(0xC5, 0x40),
_INIT_DCS_CMD(0xC6, 0x40),
_INIT_DCS_CMD(0xC7, 0x40),
_INIT_DCS_CMD(0xC8, 0x4D),
_INIT_DCS_CMD(0xC9, 0x52),
_INIT_DCS_CMD(0xCA, 0x51),
_INIT_DCS_CMD(0xCD, 0x5D),
_INIT_DCS_CMD(0xCE, 0x5B),
_INIT_DCS_CMD(0xCF, 0x4B),
_INIT_DCS_CMD(0xD0, 0x49),
_INIT_DCS_CMD(0xD1, 0x47),
_INIT_DCS_CMD(0xD2, 0x45),
_INIT_DCS_CMD(0xD3, 0x41),
_INIT_DCS_CMD(0xD7, 0x50),
_INIT_DCS_CMD(0xD8, 0x40),
_INIT_DCS_CMD(0xD9, 0x40),
_INIT_DCS_CMD(0xDA, 0x40),
_INIT_DCS_CMD(0xDB, 0x40),
_INIT_DCS_CMD(0xDC, 0x4E),
_INIT_DCS_CMD(0xDD, 0x52),
_INIT_DCS_CMD(0xDE, 0x51),
_INIT_DCS_CMD(0xE1, 0x5E),
_INIT_DCS_CMD(0xE2, 0x5C),
_INIT_DCS_CMD(0xE3, 0x4C),
_INIT_DCS_CMD(0xE4, 0x4A),
_INIT_DCS_CMD(0xE5, 0x48),
_INIT_DCS_CMD(0xE6, 0x46),
_INIT_DCS_CMD(0xE7, 0x42),
_INIT_DCS_CMD(0xB0, 0x03),
_INIT_DCS_CMD(0xBE, 0x03),
_INIT_DCS_CMD(0xCC, 0x44),
_INIT_DCS_CMD(0xC8, 0x07),
_INIT_DCS_CMD(0xC9, 0x05),
_INIT_DCS_CMD(0xCA, 0x42),
_INIT_DCS_CMD(0xCD, 0x3E),
_INIT_DCS_CMD(0xCF, 0x60),
_INIT_DCS_CMD(0xD2, 0x04),
_INIT_DCS_CMD(0xD3, 0x04),
_INIT_DCS_CMD(0xD4, 0x01),
_INIT_DCS_CMD(0xD5, 0x00),
_INIT_DCS_CMD(0xD6, 0x03),
_INIT_DCS_CMD(0xD7, 0x04),
_INIT_DCS_CMD(0xD9, 0x01),
_INIT_DCS_CMD(0xDB, 0x01),
_INIT_DCS_CMD(0xE4, 0xF0),
_INIT_DCS_CMD(0xE5, 0x0A),
_INIT_DCS_CMD(0xB0, 0x00),
_INIT_DCS_CMD(0xCC, 0x08),
_INIT_DCS_CMD(0xC2, 0x08),
_INIT_DCS_CMD(0xC4, 0x10),
_INIT_DCS_CMD(0xB0, 0x02),
_INIT_DCS_CMD(0xC0, 0x00),
_INIT_DCS_CMD(0xC1, 0x0A),
_INIT_DCS_CMD(0xC2, 0x20),
_INIT_DCS_CMD(0xC3, 0x24),
_INIT_DCS_CMD(0xC4, 0x23),
_INIT_DCS_CMD(0xC5, 0x29),
_INIT_DCS_CMD(0xC6, 0x23),
_INIT_DCS_CMD(0xC7, 0x1C),
_INIT_DCS_CMD(0xC8, 0x19),
_INIT_DCS_CMD(0xC9, 0x17),
_INIT_DCS_CMD(0xCA, 0x17),
_INIT_DCS_CMD(0xCB, 0x18),
_INIT_DCS_CMD(0xCC, 0x1A),
_INIT_DCS_CMD(0xCD, 0x1E),
_INIT_DCS_CMD(0xCE, 0x20),
_INIT_DCS_CMD(0xCF, 0x23),
_INIT_DCS_CMD(0xD0, 0x07),
_INIT_DCS_CMD(0xD1, 0x00),
_INIT_DCS_CMD(0xD2, 0x00),
_INIT_DCS_CMD(0xD3, 0x0A),
_INIT_DCS_CMD(0xD4, 0x13),
_INIT_DCS_CMD(0xD5, 0x1C),
_INIT_DCS_CMD(0xD6, 0x1A),
_INIT_DCS_CMD(0xD7, 0x13),
_INIT_DCS_CMD(0xD8, 0x17),
_INIT_DCS_CMD(0xD9, 0x1C),
_INIT_DCS_CMD(0xDA, 0x19),
_INIT_DCS_CMD(0xDB, 0x17),
_INIT_DCS_CMD(0xDC, 0x17),
_INIT_DCS_CMD(0xDD, 0x18),
_INIT_DCS_CMD(0xDE, 0x1A),
_INIT_DCS_CMD(0xDF, 0x1E),
_INIT_DCS_CMD(0xE0, 0x20),
_INIT_DCS_CMD(0xE1, 0x23),
_INIT_DCS_CMD(0xE2, 0x07),
_INIT_DCS_CMD(0X11),
_INIT_DELAY_CMD(120),
_INIT_DCS_CMD(0X29),
_INIT_DELAY_CMD(80),
{},
};
static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {
_INIT_DCS_CMD(0xB9, 0x83, 0x10, 0x21, 0x55, 0x00),
_INIT_DCS_CMD(0xB1, 0x2C, 0xB5, 0xB5, 0x31, 0xF1, 0x31, 0xD7, 0x2F, 0x36, 0x36, 0x36, 0x36, 0x1A, 0x8B, 0x11,
0x65, 0x00, 0x88, 0xFA, 0xFF, 0xFF, 0x8F, 0xFF, 0x08, 0x74, 0x33),
_INIT_DCS_CMD(0xB2, 0x00, 0x47, 0xB0, 0x80, 0x00, 0x12, 0x72, 0x3C, 0xA3, 0x03, 0x03, 0x00, 0x00, 0x88, 0xF5),
_INIT_DCS_CMD(0xB4, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x63, 0x5C, 0x63, 0x5C, 0x01, 0x9E),
_INIT_DCS_CMD(0xE9, 0xCD),
_INIT_DCS_CMD(0xBA, 0x84),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xBC, 0x1B, 0x04),
_INIT_DCS_CMD(0xBE, 0x20),
_INIT_DCS_CMD(0xBF, 0xFC, 0xC4),
_INIT_DCS_CMD(0xC0, 0x36, 0x36, 0x22, 0x11, 0x22, 0xA0, 0x61, 0x08, 0xF5, 0x03),
_INIT_DCS_CMD(0xE9, 0xCC),
_INIT_DCS_CMD(0xC7, 0x80),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xE9, 0xC6),
_INIT_DCS_CMD(0xC8, 0x97),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xC9, 0x00, 0x1E, 0x13, 0x88, 0x01),
_INIT_DCS_CMD(0xCB, 0x08, 0x13, 0x07, 0x00, 0x0F, 0x33),
_INIT_DCS_CMD(0xCC, 0x02),
_INIT_DCS_CMD(0xE9, 0xC4),
_INIT_DCS_CMD(0xD0, 0x03),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xD1, 0x37, 0x06, 0x00, 0x02, 0x04, 0x0C, 0xFF),
_INIT_DCS_CMD(0xD2, 0x1F, 0x11, 0x1F),
_INIT_DCS_CMD(0xD3, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x37, 0x47, 0x34, 0x3B, 0x12, 0x12, 0x03,
0x03, 0x32, 0x10, 0x10, 0x00, 0x10, 0x32, 0x10, 0x08, 0x00, 0x08, 0x32, 0x17, 0x94, 0x07, 0x94, 0x00, 0x00),
_INIT_DCS_CMD(0xD5, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x40, 0x40, 0x1A, 0x1A,
0x1B, 0x1B, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x28, 0x29, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18),
_INIT_DCS_CMD(0xD6, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x40, 0x40, 0x19, 0x19, 0x1A, 0x1A,
0x1B, 0x1B, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x29, 0x28, 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18),
_INIT_DCS_CMD(0xD8, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA,
0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0, 0xAA, 0xBA, 0xEA, 0xAA, 0xAA, 0xA0),
_INIT_DCS_CMD(0xE0, 0x00, 0x09, 0x14, 0x1E, 0x26, 0x48, 0x61, 0x67, 0x6C, 0x67, 0x7D, 0x7F, 0x80, 0x8B, 0x87, 0x8F, 0x98, 0xAB,
0xAB, 0x55, 0x5C, 0x68, 0x73, 0x00, 0x09, 0x14, 0x1E, 0x26, 0x48, 0x61, 0x67, 0x6C, 0x67, 0x7D, 0x7F, 0x80, 0x8B, 0x87, 0x8F, 0x98, 0xAB, 0xAB, 0x55, 0x5C, 0x68, 0x73),
_INIT_DCS_CMD(0xE7, 0x0E, 0x10, 0x10, 0x21, 0x2B, 0x9A, 0x02, 0x54, 0x9A, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x05, 0x02, 0x02, 0x10),
_INIT_DCS_CMD(0xBD, 0x01),
_INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),
_INIT_DCS_CMD(0xCB, 0x86),
_INIT_DCS_CMD(0xD2, 0x3C, 0xFA),
_INIT_DCS_CMD(0xE9, 0xC5),
_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),
_INIT_DCS_CMD(0xBD, 0x02),
_INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0),
_INIT_DCS_CMD(0xE7, 0xFE, 0x04, 0xFE, 0x04, 0xFE, 0x04, 0x03, 0x03, 0x03, 0x26, 0x00, 0x26, 0x81, 0x02, 0x40, 0x00, 0x20, 0x9E, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00),
_INIT_DCS_CMD(0xBD, 0x03),
_INIT_DCS_CMD(0xE9, 0xC6),
_INIT_DCS_CMD(0xB4, 0x03, 0xFF, 0xF8),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xD8, 0x00, 0x2A, 0xAA, 0xA8, 0x00, 0x00, 0x00, 0x2A, 0xAA, 0xA8, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x2A, 0xAA, 0xA8,
0x00, 0x00, 0x00, 0x2A, 0xAA, 0xA8, 0x00, 0x00),
_INIT_DCS_CMD(0xBD, 0x00),
_INIT_DCS_CMD(0xE9, 0xC4),
_INIT_DCS_CMD(0xBA, 0x96),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xBD, 0x01),
_INIT_DCS_CMD(0xE9, 0xC5),
_INIT_DCS_CMD(0xBA, 0x4F),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xBD, 0x00),
_INIT_DCS_CMD(0x11),
_INIT_DELAY_CMD(120),
_INIT_DCS_CMD(0x29),
{},
};
static const struct panel_init_cmd starry_ili9882t_init_cmd[] = {
_INIT_DELAY_CMD(5),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x01),
_INIT_DCS_CMD(0x00, 0x42),
_INIT_DCS_CMD(0x01, 0x11),
_INIT_DCS_CMD(0x02, 0x00),
_INIT_DCS_CMD(0x03, 0x00),
_INIT_DCS_CMD(0x04, 0x01),
_INIT_DCS_CMD(0x05, 0x11),
_INIT_DCS_CMD(0x06, 0x00),
_INIT_DCS_CMD(0x07, 0x00),
_INIT_DCS_CMD(0x08, 0x80),
_INIT_DCS_CMD(0x09, 0x81),
_INIT_DCS_CMD(0x0A, 0x71),
_INIT_DCS_CMD(0x0B, 0x00),
_INIT_DCS_CMD(0x0C, 0x00),
_INIT_DCS_CMD(0x0E, 0x1A),
_INIT_DCS_CMD(0x24, 0x00),
_INIT_DCS_CMD(0x25, 0x00),
_INIT_DCS_CMD(0x26, 0x00),
_INIT_DCS_CMD(0x27, 0x00),
_INIT_DCS_CMD(0x2C, 0xD4),
_INIT_DCS_CMD(0xB9, 0x40),
_INIT_DCS_CMD(0xB0, 0x11),
_INIT_DCS_CMD(0xE6, 0x32),
_INIT_DCS_CMD(0xD1, 0x30),
_INIT_DCS_CMD(0xD6, 0x55),
_INIT_DCS_CMD(0xD0, 0x01),
_INIT_DCS_CMD(0xE3, 0x93),
_INIT_DCS_CMD(0xE4, 0x00),
_INIT_DCS_CMD(0xE5, 0x80),
_INIT_DCS_CMD(0x31, 0x07),
_INIT_DCS_CMD(0x32, 0x07),
_INIT_DCS_CMD(0x33, 0x07),
_INIT_DCS_CMD(0x34, 0x07),
_INIT_DCS_CMD(0x35, 0x07),
_INIT_DCS_CMD(0x36, 0x01),
_INIT_DCS_CMD(0x37, 0x00),
_INIT_DCS_CMD(0x38, 0x28),
_INIT_DCS_CMD(0x39, 0x29),
_INIT_DCS_CMD(0x3A, 0x11),
_INIT_DCS_CMD(0x3B, 0x13),
_INIT_DCS_CMD(0x3C, 0x15),
_INIT_DCS_CMD(0x3D, 0x17),
_INIT_DCS_CMD(0x3E, 0x09),
_INIT_DCS_CMD(0x3F, 0x0D),
_INIT_DCS_CMD(0x40, 0x02),
_INIT_DCS_CMD(0x41, 0x02),
_INIT_DCS_CMD(0x42, 0x02),
_INIT_DCS_CMD(0x43, 0x02),
_INIT_DCS_CMD(0x44, 0x02),
_INIT_DCS_CMD(0x45, 0x02),
_INIT_DCS_CMD(0x46, 0x02),
_INIT_DCS_CMD(0x47, 0x07),
_INIT_DCS_CMD(0x48, 0x07),
_INIT_DCS_CMD(0x49, 0x07),
_INIT_DCS_CMD(0x4A, 0x07),
_INIT_DCS_CMD(0x4B, 0x07),
_INIT_DCS_CMD(0x4C, 0x01),
_INIT_DCS_CMD(0x4D, 0x00),
_INIT_DCS_CMD(0x4E, 0x28),
_INIT_DCS_CMD(0x4F, 0x29),
_INIT_DCS_CMD(0x50, 0x10),
_INIT_DCS_CMD(0x51, 0x12),
_INIT_DCS_CMD(0x52, 0x14),
_INIT_DCS_CMD(0x53, 0x16),
_INIT_DCS_CMD(0x54, 0x08),
_INIT_DCS_CMD(0x55, 0x0C),
_INIT_DCS_CMD(0x56, 0x02),
_INIT_DCS_CMD(0x57, 0x02),
_INIT_DCS_CMD(0x58, 0x02),
_INIT_DCS_CMD(0x59, 0x02),
_INIT_DCS_CMD(0x5A, 0x02),
_INIT_DCS_CMD(0x5B, 0x02),
_INIT_DCS_CMD(0x5C, 0x02),
_INIT_DCS_CMD(0x61, 0x07),
_INIT_DCS_CMD(0x62, 0x07),
_INIT_DCS_CMD(0x63, 0x07),
_INIT_DCS_CMD(0x64, 0x07),
_INIT_DCS_CMD(0x65, 0x07),
_INIT_DCS_CMD(0x66, 0x01),
_INIT_DCS_CMD(0x67, 0x00),
_INIT_DCS_CMD(0x68, 0x28),
_INIT_DCS_CMD(0x69, 0x29),
_INIT_DCS_CMD(0x6A, 0x16),
_INIT_DCS_CMD(0x6B, 0x14),
_INIT_DCS_CMD(0x6C, 0x12),
_INIT_DCS_CMD(0x6D, 0x10),
_INIT_DCS_CMD(0x6E, 0x0C),
_INIT_DCS_CMD(0x6F, 0x08),
_INIT_DCS_CMD(0x70, 0x02),
_INIT_DCS_CMD(0x71, 0x02),
_INIT_DCS_CMD(0x72, 0x02),
_INIT_DCS_CMD(0x73, 0x02),
_INIT_DCS_CMD(0x74, 0x02),
_INIT_DCS_CMD(0x75, 0x02),
_INIT_DCS_CMD(0x76, 0x02),
_INIT_DCS_CMD(0x77, 0x07),
_INIT_DCS_CMD(0x78, 0x07),
_INIT_DCS_CMD(0x79, 0x07),
_INIT_DCS_CMD(0x7A, 0x07),
_INIT_DCS_CMD(0x7B, 0x07),
_INIT_DCS_CMD(0x7C, 0x01),
_INIT_DCS_CMD(0x7D, 0x00),
_INIT_DCS_CMD(0x7E, 0x28),
_INIT_DCS_CMD(0x7F, 0x29),
_INIT_DCS_CMD(0x80, 0x17),
_INIT_DCS_CMD(0x81, 0x15),
_INIT_DCS_CMD(0x82, 0x13),
_INIT_DCS_CMD(0x83, 0x11),
_INIT_DCS_CMD(0x84, 0x0D),
_INIT_DCS_CMD(0x85, 0x09),
_INIT_DCS_CMD(0x86, 0x02),
_INIT_DCS_CMD(0x87, 0x07),
_INIT_DCS_CMD(0x88, 0x07),
_INIT_DCS_CMD(0x89, 0x07),
_INIT_DCS_CMD(0x8A, 0x07),
_INIT_DCS_CMD(0x8B, 0x07),
_INIT_DCS_CMD(0x8C, 0x07),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x02),
_INIT_DCS_CMD(0x29, 0x3A),
_INIT_DCS_CMD(0x2A, 0x3B),
_INIT_DCS_CMD(0x06, 0x01),
_INIT_DCS_CMD(0x07, 0x01),
_INIT_DCS_CMD(0x08, 0x0C),
_INIT_DCS_CMD(0x09, 0x44),
_INIT_DCS_CMD(0x3C, 0x0A),
_INIT_DCS_CMD(0x39, 0x11),
_INIT_DCS_CMD(0x3D, 0x00),
_INIT_DCS_CMD(0x3A, 0x0C),
_INIT_DCS_CMD(0x3B, 0x44),
_INIT_DCS_CMD(0x53, 0x1F),
_INIT_DCS_CMD(0x5E, 0x40),
_INIT_DCS_CMD(0x84, 0x00),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x03),
_INIT_DCS_CMD(0x20, 0x01),
_INIT_DCS_CMD(0x21, 0x3C),
_INIT_DCS_CMD(0x22, 0xFA),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0A),
_INIT_DCS_CMD(0xE0, 0x01),
_INIT_DCS_CMD(0xE2, 0x01),
_INIT_DCS_CMD(0xE5, 0x91),
_INIT_DCS_CMD(0xE6, 0x3C),
_INIT_DCS_CMD(0xE7, 0x00),
_INIT_DCS_CMD(0xE8, 0xFA),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x12),
_INIT_DCS_CMD(0x87, 0x2C),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x05),
_INIT_DCS_CMD(0x73, 0xE5),
_INIT_DCS_CMD(0x7F, 0x6B),
_INIT_DCS_CMD(0x6D, 0xA4),
_INIT_DCS_CMD(0x79, 0x54),
_INIT_DCS_CMD(0x69, 0x97),
_INIT_DCS_CMD(0x6A, 0x97),
_INIT_DCS_CMD(0xA5, 0x3F),
_INIT_DCS_CMD(0x61, 0xDA),
_INIT_DCS_CMD(0xA7, 0xF1),
_INIT_DCS_CMD(0x5F, 0x01),
_INIT_DCS_CMD(0x62, 0x3F),
_INIT_DCS_CMD(0x1D, 0x90),
_INIT_DCS_CMD(0x86, 0x87),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x06),
_INIT_DCS_CMD(0xC0, 0x80),
_INIT_DCS_CMD(0xC1, 0x07),
_INIT_DCS_CMD(0xCA, 0x58),
_INIT_DCS_CMD(0xCB, 0x02),
_INIT_DCS_CMD(0xCE, 0x58),
_INIT_DCS_CMD(0xCF, 0x02),
_INIT_DCS_CMD(0x67, 0x60),
_INIT_DCS_CMD(0x10, 0x00),
_INIT_DCS_CMD(0x92, 0x22),
_INIT_DCS_CMD(0xD3, 0x08),
_INIT_DCS_CMD(0xD6, 0x55),
_INIT_DCS_CMD(0xDC, 0x38),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x08),
_INIT_DCS_CMD(0xE0, 0x00, 0x10, 0x2A, 0x4D, 0x61, 0x56, 0x6A, 0x6E, 0x79, 0x76, 0x8F, 0x95, 0x98, 0xAE, 0xAA, 0xB2, 0xBB, 0xCE, 0xC6, 0xBD, 0xD5, 0xE2, 0xE8),
_INIT_DCS_CMD(0xE1, 0x00, 0x10, 0x2A, 0x4D, 0x61, 0x56, 0x6A, 0x6E, 0x79, 0x76, 0x8F, 0x95, 0x98, 0xAE, 0xAA, 0xB2, 0xBB, 0xCE, 0xC6, 0xBD, 0xD5, 0xE2, 0xE8),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x04),
_INIT_DCS_CMD(0xBA, 0x81),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0C),
_INIT_DCS_CMD(0x00, 0x02),
_INIT_DCS_CMD(0x01, 0x00),
_INIT_DCS_CMD(0x02, 0x03),
_INIT_DCS_CMD(0x03, 0x01),
_INIT_DCS_CMD(0x04, 0x03),
_INIT_DCS_CMD(0x05, 0x02),
_INIT_DCS_CMD(0x06, 0x04),
_INIT_DCS_CMD(0x07, 0x03),
_INIT_DCS_CMD(0x08, 0x03),
_INIT_DCS_CMD(0x09, 0x04),
_INIT_DCS_CMD(0x0A, 0x04),
_INIT_DCS_CMD(0x0B, 0x05),
_INIT_DCS_CMD(0x0C, 0x04),
_INIT_DCS_CMD(0x0D, 0x06),
_INIT_DCS_CMD(0x0E, 0x05),
_INIT_DCS_CMD(0x0F, 0x07),
_INIT_DCS_CMD(0x10, 0x04),
_INIT_DCS_CMD(0x11, 0x08),
_INIT_DCS_CMD(0x12, 0x05),
_INIT_DCS_CMD(0x13, 0x09),
_INIT_DCS_CMD(0x14, 0x05),
_INIT_DCS_CMD(0x15, 0x0A),
_INIT_DCS_CMD(0x16, 0x06),
_INIT_DCS_CMD(0x17, 0x0B),
_INIT_DCS_CMD(0x18, 0x05),
_INIT_DCS_CMD(0x19, 0x0C),
_INIT_DCS_CMD(0x1A, 0x06),
_INIT_DCS_CMD(0x1B, 0x0D),
_INIT_DCS_CMD(0x1C, 0x06),
_INIT_DCS_CMD(0x1D, 0x0E),
_INIT_DCS_CMD(0x1E, 0x07),
_INIT_DCS_CMD(0x1F, 0x0F),
_INIT_DCS_CMD(0x20, 0x06),
_INIT_DCS_CMD(0x21, 0x10),
_INIT_DCS_CMD(0x22, 0x07),
_INIT_DCS_CMD(0x23, 0x11),
_INIT_DCS_CMD(0x24, 0x07),
_INIT_DCS_CMD(0x25, 0x12),
_INIT_DCS_CMD(0x26, 0x08),
_INIT_DCS_CMD(0x27, 0x13),
_INIT_DCS_CMD(0x28, 0x07),
_INIT_DCS_CMD(0x29, 0x14),
_INIT_DCS_CMD(0x2A, 0x08),
_INIT_DCS_CMD(0x2B, 0x15),
_INIT_DCS_CMD(0x2C, 0x08),
_INIT_DCS_CMD(0x2D, 0x16),
_INIT_DCS_CMD(0x2E, 0x09),
_INIT_DCS_CMD(0x2F, 0x17),
_INIT_DCS_CMD(0x30, 0x08),
_INIT_DCS_CMD(0x31, 0x18),
_INIT_DCS_CMD(0x32, 0x09),
_INIT_DCS_CMD(0x33, 0x19),
_INIT_DCS_CMD(0x34, 0x09),
_INIT_DCS_CMD(0x35, 0x1A),
_INIT_DCS_CMD(0x36, 0x0A),
_INIT_DCS_CMD(0x37, 0x1B),
_INIT_DCS_CMD(0x38, 0x0A),
_INIT_DCS_CMD(0x39, 0x1C),
_INIT_DCS_CMD(0x3A, 0x0A),
_INIT_DCS_CMD(0x3B, 0x1D),
_INIT_DCS_CMD(0x3C, 0x0A),
_INIT_DCS_CMD(0x3D, 0x1E),
_INIT_DCS_CMD(0x3E, 0x0A),
_INIT_DCS_CMD(0x3F, 0x1F),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x04),
_INIT_DCS_CMD(0xBA, 0x01),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0E),
_INIT_DCS_CMD(0x02, 0x0C),
_INIT_DCS_CMD(0x20, 0x10),
_INIT_DCS_CMD(0x25, 0x16),
_INIT_DCS_CMD(0x26, 0xE0),
_INIT_DCS_CMD(0x27, 0x00),
_INIT_DCS_CMD(0x29, 0x71),
_INIT_DCS_CMD(0x2A, 0x46),
_INIT_DCS_CMD(0x2B, 0x1F),
_INIT_DCS_CMD(0x2D, 0xC7),
_INIT_DCS_CMD(0x31, 0x02),
_INIT_DCS_CMD(0x32, 0xDF),
_INIT_DCS_CMD(0x33, 0x5A),
_INIT_DCS_CMD(0x34, 0xC0),
_INIT_DCS_CMD(0x35, 0x5A),
_INIT_DCS_CMD(0x36, 0xC0),
_INIT_DCS_CMD(0x38, 0x65),
_INIT_DCS_CMD(0x80, 0x3E),
_INIT_DCS_CMD(0x81, 0xA0),
_INIT_DCS_CMD(0xB0, 0x01),
_INIT_DCS_CMD(0xB1, 0xCC),
_INIT_DCS_CMD(0xC0, 0x12),
_INIT_DCS_CMD(0xC2, 0xCC),
_INIT_DCS_CMD(0xC3, 0xCC),
_INIT_DCS_CMD(0xC4, 0xCC),
_INIT_DCS_CMD(0xC5, 0xCC),
_INIT_DCS_CMD(0xC6, 0xCC),
_INIT_DCS_CMD(0xC7, 0xCC),
_INIT_DCS_CMD(0xC8, 0xCC),
_INIT_DCS_CMD(0xC9, 0xCC),
_INIT_DCS_CMD(0x30, 0x00),
_INIT_DCS_CMD(0x00, 0x81),
_INIT_DCS_CMD(0x08, 0x02),
_INIT_DCS_CMD(0x09, 0x00),
_INIT_DCS_CMD(0x07, 0x21),
_INIT_DCS_CMD(0x04, 0x10),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x1E),
_INIT_DCS_CMD(0x60, 0x00),
_INIT_DCS_CMD(0x64, 0x00),
_INIT_DCS_CMD(0x6D, 0x00),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x0B),
_INIT_DCS_CMD(0xA6, 0x44),
_INIT_DCS_CMD(0xA7, 0xB6),
_INIT_DCS_CMD(0xA8, 0x03),
_INIT_DCS_CMD(0xA9, 0x03),
_INIT_DCS_CMD(0xAA, 0x51),
_INIT_DCS_CMD(0xAB, 0x51),
_INIT_DCS_CMD(0xAC, 0x04),
_INIT_DCS_CMD(0xBD, 0x92),
_INIT_DCS_CMD(0xBE, 0xA1),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x05),
_INIT_DCS_CMD(0x86, 0x87),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x06),
_INIT_DCS_CMD(0x92, 0x22),
_INIT_DCS_CMD(0xFF, 0x98, 0x82, 0x00),
_INIT_DCS_CMD(0x11),
_INIT_DELAY_CMD(120),
_INIT_DCS_CMD(0x29),
_INIT_DELAY_CMD(20),
{},
};
static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
{
return container_of(panel, struct boe_panel, base);
}
static int boe_panel_init_dcs_cmd(struct boe_panel *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
struct drm_panel *panel = &boe->base;
int i, err = 0;
if (boe->desc->init_cmds) {
const struct panel_init_cmd *init_cmds = boe->desc->init_cmds;
for (i = 0; init_cmds[i].len != 0; i++) {
const struct panel_init_cmd *cmd = &init_cmds[i];
switch (cmd->type) {
case DELAY_CMD:
msleep(cmd->data[0]);
err = 0;
break;
case INIT_DCS_CMD:
err = mipi_dsi_dcs_write(dsi, cmd->data[0],
cmd->len <= 1 ? NULL :
&cmd->data[1],
cmd->len - 1);
break;
default:
err = -EINVAL;
}
if (err < 0) {
dev_err(panel->dev,
"failed to write command %u\n", i);
return err;
}
}
}
return 0;
}
static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
return ret;
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
return ret;
return 0;
}
static int boe_panel_disable(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
int ret;
ret = boe_panel_enter_sleep_mode(boe);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
return ret;
}
msleep(150);
return 0;
}
static int boe_panel_unprepare(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
if (!boe->prepared)
return 0;
if (boe->desc->discharge_on_disable) {
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
usleep_range(5000, 7000);
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
regulator_disable(boe->pp3300);
} else {
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(1000, 2000);
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
regulator_disable(boe->pp3300);
}
boe->prepared = false;
return 0;
}
static int boe_panel_prepare(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
int ret;
if (boe->prepared)
return 0;
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(1000, 1500);
ret = regulator_enable(boe->pp3300);
if (ret < 0)
return ret;
ret = regulator_enable(boe->pp1800);
if (ret < 0)
return ret;
usleep_range(3000, 5000);
ret = regulator_enable(boe->avdd);
if (ret < 0)
goto poweroff1v8;
ret = regulator_enable(boe->avee);
if (ret < 0)
goto poweroffavdd;
usleep_range(10000, 11000);
if (boe->desc->lp11_before_reset) {
mipi_dsi_dcs_nop(boe->dsi);
usleep_range(1000, 2000);
}
gpiod_set_value(boe->enable_gpio, 1);
usleep_range(1000, 2000);
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(1000, 2000);
gpiod_set_value(boe->enable_gpio, 1);
usleep_range(6000, 10000);
ret = boe_panel_init_dcs_cmd(boe);
if (ret < 0) {
dev_err(panel->dev, "failed to init panel: %d\n", ret);
goto poweroff;
}
boe->prepared = true;
return 0;
poweroff:
regulator_disable(boe->avee);
poweroffavdd:
regulator_disable(boe->avdd);
poweroff1v8:
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
gpiod_set_value(boe->enable_gpio, 0);
return ret;
}
static int boe_panel_enable(struct drm_panel *panel)
{
msleep(130);
return 0;
}
static const struct drm_display_mode boe_tv110c9m_default_mode = {
.clock = 166594,
.hdisplay = 1200,
.hsync_start = 1200 + 40,
.hsync_end = 1200 + 40 + 8,
.htotal = 1200 + 40 + 8 + 28,
.vdisplay = 2000,
.vsync_start = 2000 + 26,
.vsync_end = 2000 + 26 + 2,
.vtotal = 2000 + 26 + 2 + 148,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc boe_tv110c9m_desc = {
.modes = &boe_tv110c9m_default_mode,
.bpc = 8,
.size = {
.width_mm = 143,
.height_mm = 238,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
| MIPI_DSI_MODE_VIDEO_HSE
| MIPI_DSI_CLOCK_NON_CONTINUOUS
| MIPI_DSI_MODE_VIDEO_BURST,
.init_cmds = boe_tv110c9m_init_cmd,
};
static const struct drm_display_mode inx_hj110iz_default_mode = {
.clock = 168432,
.hdisplay = 1200,
.hsync_start = 1200 + 40,
.hsync_end = 1200 + 40 + 8,
.htotal = 1200 + 40 + 8 + 28,
.vdisplay = 2000,
.vsync_start = 2000 + 26,
.vsync_end = 2000 + 26 + 2,
.vtotal = 2000 + 26 + 2 + 172,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc inx_hj110iz_desc = {
.modes = &inx_hj110iz_default_mode,
.bpc = 8,
.size = {
.width_mm = 143,
.height_mm = 238,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
| MIPI_DSI_MODE_VIDEO_HSE
| MIPI_DSI_CLOCK_NON_CONTINUOUS
| MIPI_DSI_MODE_VIDEO_BURST,
.init_cmds = inx_hj110iz_init_cmd,
};
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
.clock = 159425,
.hdisplay = 1200,
.hsync_start = 1200 + 100,
.hsync_end = 1200 + 100 + 40,
.htotal = 1200 + 100 + 40 + 24,
.vdisplay = 1920,
.vsync_start = 1920 + 10,
.vsync_end = 1920 + 10 + 14,
.vtotal = 1920 + 10 + 14 + 4,
};
static const struct panel_desc boe_tv101wum_nl6_desc = {
.modes = &boe_tv101wum_nl6_default_mode,
.bpc = 8,
.size = {
.width_mm = 135,
.height_mm = 216,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = boe_init_cmd,
.discharge_on_disable = false,
};
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.clock = 157000,
.hdisplay = 1200,
.hsync_start = 1200 + 60,
.hsync_end = 1200 + 60 + 24,
.htotal = 1200 + 60 + 24 + 56,
.vdisplay = 1920,
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,
.vtotal = 1920 + 16 + 4 + 16,
};
static const struct panel_desc auo_kd101n80_45na_desc = {
.modes = &auo_kd101n80_45na_default_mode,
.bpc = 8,
.size = {
.width_mm = 135,
.height_mm = 216,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = auo_kd101n80_45na_init_cmd,
.discharge_on_disable = true,
};
static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
.clock = 159916,
.hdisplay = 1200,
.hsync_start = 1200 + 80,
.hsync_end = 1200 + 80 + 24,
.htotal = 1200 + 80 + 24 + 60,
.vdisplay = 1920,
.vsync_start = 1920 + 20,
.vsync_end = 1920 + 20 + 4,
.vtotal = 1920 + 20 + 4 + 10,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc boe_tv101wum_n53_desc = {
.modes = &boe_tv101wum_n53_default_mode,
.bpc = 8,
.size = {
.width_mm = 135,
.height_mm = 216,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = boe_init_cmd,
};
static const struct drm_display_mode auo_b101uan08_3_default_mode = {
.clock = 159667,
.hdisplay = 1200,
.hsync_start = 1200 + 60,
.hsync_end = 1200 + 60 + 4,
.htotal = 1200 + 60 + 4 + 80,
.vdisplay = 1920,
.vsync_start = 1920 + 34,
.vsync_end = 1920 + 34 + 2,
.vtotal = 1920 + 34 + 2 + 24,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc auo_b101uan08_3_desc = {
.modes = &auo_b101uan08_3_default_mode,
.bpc = 8,
.size = {
.width_mm = 135,
.height_mm = 216,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = auo_b101uan08_3_init_cmd,
};
static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
.clock = 159916,
.hdisplay = 1200,
.hsync_start = 1200 + 80,
.hsync_end = 1200 + 80 + 24,
.htotal = 1200 + 80 + 24 + 60,
.vdisplay = 1920,
.vsync_start = 1920 + 20,
.vsync_end = 1920 + 20 + 4,
.vtotal = 1920 + 20 + 4 + 10,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc boe_tv105wum_nw0_desc = {
.modes = &boe_tv105wum_nw0_default_mode,
.bpc = 8,
.size = {
.width_mm = 141,
.height_mm = 226,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = boe_init_cmd,
.lp11_before_reset = true,
};
static const struct drm_display_mode starry_qfh032011_53g_default_mode = {
.clock = 165731,
.hdisplay = 1200,
.hsync_start = 1200 + 100,
.hsync_end = 1200 + 100 + 10,
.htotal = 1200 + 100 + 10 + 100,
.vdisplay = 1920,
.vsync_start = 1920 + 14,
.vsync_end = 1920 + 14 + 10,
.vtotal = 1920 + 14 + 10 + 15,
};
static const struct panel_desc starry_qfh032011_53g_desc = {
.modes = &starry_qfh032011_53g_default_mode,
.bpc = 8,
.size = {
.width_mm = 135,
.height_mm = 216,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_qfh032011_53g_init_cmd,
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
.clock = 161600,
.hdisplay = 1200,
.hsync_start = 1200 + 40,
.hsync_end = 1200 + 40 + 20,
.htotal = 1200 + 40 + 20 + 40,
.vdisplay = 1920,
.vsync_start = 1920 + 116,
.vsync_end = 1920 + 116 + 8,
.vtotal = 1920 + 116 + 8 + 12,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc starry_himax83102_j02_desc = {
.modes = &starry_himax83102_j02_default_mode,
.bpc = 8,
.size = {
.width_mm = 141,
.height_mm = 226,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_himax83102_j02_init_cmd,
.lp11_before_reset = true,
};
static const struct drm_display_mode starry_ili9882t_default_mode = {
.clock = 165280,
.hdisplay = 1200,
.hsync_start = 1200 + 72,
.hsync_end = 1200 + 72 + 30,
.htotal = 1200 + 72 + 30 + 72,
.vdisplay = 1920,
.vsync_start = 1920 + 68,
.vsync_end = 1920 + 68 + 2,
.vtotal = 1920 + 68 + 2 + 10,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct panel_desc starry_ili9882t_desc = {
.modes = &starry_ili9882t_default_mode,
.bpc = 8,
.size = {
.width_mm = 141,
.height_mm = 226,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_ili9882t_init_cmd,
.lp11_before_reset = true,
};
static int boe_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct boe_panel *boe = to_boe_panel(panel);
const struct drm_display_mode *m = boe->desc->modes;
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = boe->desc->size.width_mm;
connector->display_info.height_mm = boe->desc->size.height_mm;
connector->display_info.bpc = boe->desc->bpc;
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, boe->orientation);
return 1;
}
static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
return boe->orientation;
}
static const struct drm_panel_funcs boe_panel_funcs = {
.disable = boe_panel_disable,
.unprepare = boe_panel_unprepare,
.prepare = boe_panel_prepare,
.enable = boe_panel_enable,
.get_modes = boe_panel_get_modes,
.get_orientation = boe_panel_get_orientation,
};
static int boe_panel_add(struct boe_panel *boe)
{
struct device *dev = &boe->dsi->dev;
int err;
boe->avdd = devm_regulator_get(dev, "avdd");
if (IS_ERR(boe->avdd))
return PTR_ERR(boe->avdd);
boe->avee = devm_regulator_get(dev, "avee");
if (IS_ERR(boe->avee))
return PTR_ERR(boe->avee);
boe->pp3300 = devm_regulator_get(dev, "pp3300");
if (IS_ERR(boe->pp3300))
return PTR_ERR(boe->pp3300);
boe->pp1800 = devm_regulator_get(dev, "pp1800");
if (IS_ERR(boe->pp1800))
return PTR_ERR(boe->pp1800);
boe->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(boe->enable_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(boe->enable_gpio));
return PTR_ERR(boe->enable_gpio);
}
gpiod_set_value(boe->enable_gpio, 0);
drm_panel_init(&boe->base, dev, &boe_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
return err;
}
err = drm_panel_of_backlight(&boe->base);
if (err)
return err;
boe->base.funcs = &boe_panel_funcs;
boe->base.dev = &boe->dsi->dev;
drm_panel_add(&boe->base);
return 0;
}
static int boe_panel_probe(struct mipi_dsi_device *dsi)
{
struct boe_panel *boe;
int ret;
const struct panel_desc *desc;
boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
if (!boe)
return -ENOMEM;
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
dsi->format = desc->format;
dsi->mode_flags = desc->mode_flags;
boe->desc = desc;
boe->dsi = dsi;
ret = boe_panel_add(boe);
if (ret < 0)
return ret;
mipi_dsi_set_drvdata(dsi, boe);
ret = mipi_dsi_attach(dsi);
if (ret)
drm_panel_remove(&boe->base);
return ret;
}
static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
drm_panel_disable(&boe->base);
drm_panel_unprepare(&boe->base);
}
static void boe_panel_remove(struct mipi_dsi_device *dsi)
{
struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
int ret;
boe_panel_shutdown(dsi);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
if (boe->base.dev)
drm_panel_remove(&boe->base);
}
static const struct of_device_id boe_of_match[] = {
{ .compatible = "boe,tv101wum-nl6",
.data = &boe_tv101wum_nl6_desc
},
{ .compatible = "auo,kd101n80-45na",
.data = &auo_kd101n80_45na_desc
},
{ .compatible = "boe,tv101wum-n53",
.data = &boe_tv101wum_n53_desc
},
{ .compatible = "auo,b101uan08.3",
.data = &auo_b101uan08_3_desc
},
{ .compatible = "boe,tv105wum-nw0",
.data = &boe_tv105wum_nw0_desc
},
{ .compatible = "boe,tv110c9m-ll3",
.data = &boe_tv110c9m_desc
},
{ .compatible = "innolux,hj110iz-01a",
.data = &inx_hj110iz_desc
},
{ .compatible = "starry,2081101qfh032011-53g",
.data = &starry_qfh032011_53g_desc
},
{ .compatible = "starry,himax83102-j02",
.data = &starry_himax83102_j02_desc
},
{ .compatible = "starry,ili9882t",
.data = &starry_ili9882t_desc
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
static struct mipi_dsi_driver boe_panel_driver = {
.driver = {
.name = "panel-boe-tv101wum-nl6",
.of_match_table = boe_of_match,
},
.probe = boe_panel_probe,
.remove = boe_panel_remove,
.shutdown = boe_panel_shutdown,
};
module_mipi_dsi_driver(boe_panel_driver);
MODULE_AUTHOR("Jitao Shi <[email protected]>");
MODULE_DESCRIPTION("BOE tv101wum-nl6 1200x1920 video mode panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
*
* Inki Dae, <[email protected]>
* Donghwa Lee, <[email protected]>
* Joongmock Shin <[email protected]>
* Eunchul Kim <[email protected]>
* Tomasz Figa <[email protected]>
* Andrzej Hajda <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define LDI_MTP_LENGTH 24
#define GAMMA_LEVEL_NUM 25
#define GAMMA_TABLE_LEN 26
#define PANELCTL_SS_MASK (1 << 5)
#define PANELCTL_SS_1_800 (0 << 5)
#define PANELCTL_SS_800_1 (1 << 5)
#define PANELCTL_GTCON_MASK (7 << 2)
#define PANELCTL_GTCON_110 (6 << 2)
#define PANELCTL_GTCON_111 (7 << 2)
#define PANELCTL_CLK1_CON_MASK (7 << 3)
#define PANELCTL_CLK1_000 (0 << 3)
#define PANELCTL_CLK1_001 (1 << 3)
#define PANELCTL_CLK2_CON_MASK (7 << 0)
#define PANELCTL_CLK2_000 (0 << 0)
#define PANELCTL_CLK2_001 (1 << 0)
#define PANELCTL_INT1_CON_MASK (7 << 3)
#define PANELCTL_INT1_000 (0 << 3)
#define PANELCTL_INT1_001 (1 << 3)
#define PANELCTL_INT2_CON_MASK (7 << 0)
#define PANELCTL_INT2_000 (0 << 0)
#define PANELCTL_INT2_001 (1 << 0)
#define PANELCTL_BICTL_CON_MASK (7 << 3)
#define PANELCTL_BICTL_000 (0 << 3)
#define PANELCTL_BICTL_001 (1 << 3)
#define PANELCTL_BICTLB_CON_MASK (7 << 0)
#define PANELCTL_BICTLB_000 (0 << 0)
#define PANELCTL_BICTLB_001 (1 << 0)
#define PANELCTL_EM_CLK1_CON_MASK (7 << 3)
#define PANELCTL_EM_CLK1_110 (6 << 3)
#define PANELCTL_EM_CLK1_111 (7 << 3)
#define PANELCTL_EM_CLK1B_CON_MASK (7 << 0)
#define PANELCTL_EM_CLK1B_110 (6 << 0)
#define PANELCTL_EM_CLK1B_111 (7 << 0)
#define PANELCTL_EM_CLK2_CON_MASK (7 << 3)
#define PANELCTL_EM_CLK2_110 (6 << 3)
#define PANELCTL_EM_CLK2_111 (7 << 3)
#define PANELCTL_EM_CLK2B_CON_MASK (7 << 0)
#define PANELCTL_EM_CLK2B_110 (6 << 0)
#define PANELCTL_EM_CLK2B_111 (7 << 0)
#define PANELCTL_EM_INT1_CON_MASK (7 << 3)
#define PANELCTL_EM_INT1_000 (0 << 3)
#define PANELCTL_EM_INT1_001 (1 << 3)
#define PANELCTL_EM_INT2_CON_MASK (7 << 0)
#define PANELCTL_EM_INT2_000 (0 << 0)
#define PANELCTL_EM_INT2_001 (1 << 0)
#define AID_DISABLE (0x4)
#define AID_1 (0x5)
#define AID_2 (0x6)
#define AID_3 (0x7)
typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
struct s6e8aa0_variant {
u8 version;
const s6e8aa0_gamma_table *gamma_tables;
};
struct s6e8aa0 {
struct device *dev;
struct drm_panel panel;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
u32 power_on_delay;
u32 reset_delay;
u32 init_delay;
bool flip_horizontal;
bool flip_vertical;
struct videomode vm;
u32 width_mm;
u32 height_mm;
u8 version;
u8 id;
const struct s6e8aa0_variant *variant;
int brightness;
/* This field is tested by functions directly accessing DSI bus before
* transfer, transfer is skipped if it is set. In case of transfer
* failure or unexpected response the field is set to error value.
* Such construct allows to eliminate many checks in higher level
* functions.
*/
int error;
};
static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
{
return container_of(panel, struct s6e8aa0, panel);
}
static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
{
int ret = ctx->error;
ctx->error = 0;
return ret;
}
static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
ssize_t ret;
if (ctx->error < 0)
return;
ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
if (ret < 0) {
dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
(int)len, data);
ctx->error = ret;
}
}
static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (ctx->error < 0)
return ctx->error;
ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
if (ret < 0) {
dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
ctx->error = ret;
}
return ret;
}
#define s6e8aa0_dcs_write_seq(ctx, seq...) \
({\
const u8 d[] = { seq };\
BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
})
#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
({\
static const u8 d[] = { seq };\
s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
})
static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
{
s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
}
static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
{
static const u8 aids[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
};
u8 aid = aids[ctx->id >> 5];
u8 cfg = 0x3d;
u8 clk_con = 0xc8;
u8 int_con = 0x08;
u8 bictl_con = 0x48;
u8 em_clk1_con = 0xff;
u8 em_clk2_con = 0xff;
u8 em_int_con = 0xc8;
if (ctx->flip_vertical) {
/* GTCON */
cfg &= ~(PANELCTL_GTCON_MASK);
cfg |= (PANELCTL_GTCON_110);
}
if (ctx->flip_horizontal) {
/* SS */
cfg &= ~(PANELCTL_SS_MASK);
cfg |= (PANELCTL_SS_1_800);
}
if (ctx->flip_horizontal || ctx->flip_vertical) {
/* CLK1,2_CON */
clk_con &= ~(PANELCTL_CLK1_CON_MASK |
PANELCTL_CLK2_CON_MASK);
clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
/* INT1,2_CON */
int_con &= ~(PANELCTL_INT1_CON_MASK |
PANELCTL_INT2_CON_MASK);
int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
/* BICTL,B_CON */
bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
PANELCTL_BICTLB_CON_MASK);
bictl_con |= (PANELCTL_BICTL_000 |
PANELCTL_BICTLB_001);
/* EM_CLK1,1B_CON */
em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
PANELCTL_EM_CLK1B_CON_MASK);
em_clk1_con |= (PANELCTL_EM_CLK1_110 |
PANELCTL_EM_CLK1B_110);
/* EM_CLK2,2B_CON */
em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
PANELCTL_EM_CLK2B_CON_MASK);
em_clk2_con |= (PANELCTL_EM_CLK2_110 |
PANELCTL_EM_CLK2B_110);
/* EM_INT1,2_CON */
em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
PANELCTL_EM_INT2_CON_MASK);
em_int_con |= (PANELCTL_EM_INT1_000 |
PANELCTL_EM_INT2_001);
}
s6e8aa0_dcs_write_seq(ctx,
0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
em_int_con);
}
static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
{
if (ctx->version < 142)
s6e8aa0_dcs_write_seq_static(ctx,
0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
);
else
s6e8aa0_panel_cond_set_v142(ctx);
}
static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
{
s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
}
static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
{
s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
}
static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
{
static const u8 pent32[] = {
0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
};
static const u8 pent142[] = {
0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
};
if (ctx->version < 142)
s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
else
s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
}
static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
{
static const u8 pwr142[] = {
0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
};
static const u8 pwr32[] = {
0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
};
if (ctx->version < 142)
s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
else
s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
}
static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
{
u8 id = ctx->id ? 0 : 0x95;
s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
}
static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
{
u8 br;
switch (ctx->brightness) {
case 0 ... 6: /* 30cd ~ 100cd */
br = 0xdf;
break;
case 7 ... 11: /* 120cd ~ 150cd */
br = 0xdd;
break;
case 12 ... 15: /* 180cd ~ 210cd */
default:
br = 0xd9;
break;
case 16 ... 24: /* 240cd ~ 300cd */
br = 0xd0;
break;
}
s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
}
static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
{
if (ctx->version < 142)
s6e8aa0_dcs_write_seq_static(ctx,
0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
else
s6e8aa0_elvss_nvm_set_v142(ctx);
};
static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
{
s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
}
static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
{
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
0x00, 0x70,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
0x00, 0x7d,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
0x00, 0x8f,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
0x00, 0x9e,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
0x00, 0xa4,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
0x00, 0xaa,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
0x00, 0xaf,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
0x00, 0xb9,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
0x00, 0xbf,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
0x00, 0xc3,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
0x00, 0xc8,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
0x00, 0xcc,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
0x00, 0xcf,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
0x00, 0xd4,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
0x00, 0xd8,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
0x00, 0xdc,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
0x00, 0xdf,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
0x00, 0xe2,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
0x00, 0xe6,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
0x00, 0xe9,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
0x00, 0xec,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
0x00, 0xf0,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
0x00, 0xf3,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
0x00, 0xf6,
}, {
0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
0x00, 0xfc,
},
};
static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
{
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
0x00, 0x5f,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
0xc1, 0xd2, 0xd1, 0xce, 0x00, 0x53, 0x00, 0x46,
0x00, 0x67,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
0xbd, 0xd2, 0xd2, 0xce, 0x00, 0x59, 0x00, 0x4b,
0x00, 0x6e,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
0x00, 0x75,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
0xb9, 0xd0, 0xd1, 0xcd, 0x00, 0x63, 0x00, 0x54,
0x00, 0x7a,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
0xb9, 0xce, 0xce, 0xc9, 0x00, 0x68, 0x00, 0x59,
0x00, 0x81,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
0xb8, 0xcc, 0xcd, 0xc7, 0x00, 0x6c, 0x00, 0x5c,
0x00, 0x86,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
0xb5, 0xca, 0xcc, 0xc5, 0x00, 0x74, 0x00, 0x63,
0x00, 0x90,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
0xb4, 0xca, 0xcb, 0xc5, 0x00, 0x77, 0x00, 0x66,
0x00, 0x94,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
0xb3, 0xc9, 0xca, 0xc3, 0x00, 0x7b, 0x00, 0x69,
0x00, 0x99,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
0xb2, 0xca, 0xcb, 0xc4, 0x00, 0x7e, 0x00, 0x6c,
0x00, 0x9d,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
0xb0, 0xc7, 0xc9, 0xc1, 0x00, 0x84, 0x00, 0x71,
0x00, 0xa5,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
0xaf, 0xc7, 0xc9, 0xc1, 0x00, 0x87, 0x00, 0x73,
0x00, 0xa8,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
0xaf, 0xc5, 0xc7, 0xbf, 0x00, 0x8a, 0x00, 0x76,
0x00, 0xac,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
0xaF, 0xc5, 0xc7, 0xbf, 0x00, 0x8c, 0x00, 0x78,
0x00, 0xaf,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
0xae, 0xc5, 0xc6, 0xbe, 0x00, 0x91, 0x00, 0x7d,
0x00, 0xb6,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
0xad, 0xc3, 0xc4, 0xbb, 0x00, 0x94, 0x00, 0x7f,
0x00, 0xba,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
0xac, 0xc3, 0xc5, 0xbc, 0x00, 0x96, 0x00, 0x81,
0x00, 0xbd,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
0xab, 0xc2, 0xc4, 0xbb, 0x00, 0x99, 0x00, 0x83,
0x00, 0xc0,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
0xab, 0xc1, 0xc4, 0xba, 0x00, 0x9b, 0x00, 0x85,
0x00, 0xc3,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
0xab, 0xc1, 0xc2, 0xb9, 0x00, 0x9D, 0x00, 0x87,
0x00, 0xc6,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
0xab, 0xbe, 0xc0, 0xb7, 0x00, 0xa1, 0x00, 0x8a,
0x00, 0xca,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
0xa9, 0xbe, 0xc1, 0xb7, 0x00, 0xa3, 0x00, 0x8b,
0x00, 0xce,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
0xa9, 0xbd, 0xc0, 0xb6, 0x00, 0xa5, 0x00, 0x8d,
0x00, 0xd0,
}, {
0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
0xa8, 0xbe, 0xc0, 0xb7, 0x00, 0xa8, 0x00, 0x90,
0x00, 0xd3,
}
};
static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
{
0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
0x00, 0x58,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
0x00, 0x64,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
0x00, 0x74,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
0x00, 0x80,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
0x00, 0x85,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
0x00, 0x8a,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
0x00, 0x8e,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
0x00, 0x96,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
0x00, 0x9b,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
0x00, 0x9e,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
0x00, 0xa2,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
0x00, 0xa5,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
0x00, 0xa8,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
0x00, 0xac,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
0x00, 0xaf,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
0x00, 0xb2,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
0x00, 0xb5,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
0x00, 0xb8,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
0x00, 0xbb,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
0x00, 0xbd,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
0x00, 0xc0,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
0x00, 0xc3,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
0x00, 0xc5,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
0x00, 0xc8,
}, {
0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
0x00, 0xcc,
},
};
static const struct s6e8aa0_variant s6e8aa0_variants[] = {
{
.version = 32,
.gamma_tables = s6e8aa0_gamma_tables_v32,
}, {
.version = 96,
.gamma_tables = s6e8aa0_gamma_tables_v96,
}, {
.version = 142,
.gamma_tables = s6e8aa0_gamma_tables_v142,
}, {
.version = 210,
.gamma_tables = s6e8aa0_gamma_tables_v142,
}
};
static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
{
const u8 *gamma;
if (ctx->error)
return;
gamma = ctx->variant->gamma_tables[ctx->brightness];
if (ctx->version >= 142)
s6e8aa0_elvss_nvm_set(ctx);
s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
/* update gamma table. */
s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
}
static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
{
s6e8aa0_apply_level_1_key(ctx);
s6e8aa0_apply_level_2_key(ctx);
msleep(20);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(40);
s6e8aa0_panel_cond_set(ctx);
s6e8aa0_display_condition_set(ctx);
s6e8aa0_brightness_set(ctx);
s6e8aa0_etc_source_control(ctx);
s6e8aa0_etc_pentile_control(ctx);
s6e8aa0_elvss_nvm_set(ctx);
s6e8aa0_etc_power_control(ctx);
s6e8aa0_etc_elvss_control(ctx);
msleep(ctx->init_delay);
}
static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
u16 size)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
if (ctx->error < 0)
return;
ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
if (ret < 0) {
dev_err(ctx->dev,
"error %d setting maximum return packet size to %d\n",
ret, size);
ctx->error = ret;
}
}
static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
{
u8 id[3];
int ret, i;
ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
dev_err(ctx->dev, "read id failed\n");
ctx->error = -EIO;
return;
}
dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
if (id[1] == s6e8aa0_variants[i].version)
break;
}
if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
ctx->error = -EINVAL;
return;
}
ctx->variant = &s6e8aa0_variants[i];
ctx->version = id[1];
ctx->id = id[2];
}
static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
{
s6e8aa0_set_maximum_return_packet_size(ctx, 3);
s6e8aa0_read_mtp_id(ctx);
s6e8aa0_panel_init(ctx);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
}
static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
{
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
msleep(ctx->power_on_delay);
gpiod_set_value(ctx->reset_gpio, 0);
usleep_range(10000, 11000);
gpiod_set_value(ctx->reset_gpio, 1);
msleep(ctx->reset_delay);
return 0;
}
static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
{
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static int s6e8aa0_disable(struct drm_panel *panel)
{
return 0;
}
static int s6e8aa0_unprepare(struct drm_panel *panel)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
msleep(40);
s6e8aa0_clear_error(ctx);
return s6e8aa0_power_off(ctx);
}
static int s6e8aa0_prepare(struct drm_panel *panel)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
int ret;
ret = s6e8aa0_power_on(ctx);
if (ret < 0)
return ret;
s6e8aa0_set_sequence(ctx);
ret = ctx->error;
if (ret < 0)
s6e8aa0_unprepare(panel);
return ret;
}
static int s6e8aa0_enable(struct drm_panel *panel)
{
return 0;
}
static int s6e8aa0_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
if (!mode) {
dev_err(panel->dev, "failed to create a new display mode\n");
return 0;
}
drm_display_mode_from_videomode(&ctx->vm, mode);
mode->width_mm = ctx->width_mm;
mode->height_mm = ctx->height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
.disable = s6e8aa0_disable,
.unprepare = s6e8aa0_unprepare,
.prepare = s6e8aa0_prepare,
.enable = s6e8aa0_enable,
.get_modes = s6e8aa0_get_modes,
};
static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
{
struct device *dev = ctx->dev;
struct device_node *np = dev->of_node;
int ret;
ret = of_get_videomode(np, &ctx->vm, 0);
if (ret < 0)
return ret;
of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
of_property_read_u32(np, "init-delay", &ctx->init_delay);
of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
return 0;
}
static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct s6e8aa0 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
ret = s6e8aa0_parse_dt(ctx);
if (ret < 0)
return ret;
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
dev_err(dev, "failed to get regulators: %d\n", ret);
return ret;
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
ctx->brightness = GAMMA_LEVEL_NUM - 1;
drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
drm_panel_remove(&ctx->panel);
return ret;
}
static void s6e8aa0_remove(struct mipi_dsi_device *dsi)
{
struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id s6e8aa0_of_match[] = {
{ .compatible = "samsung,s6e8aa0" },
{ }
};
MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
static struct mipi_dsi_driver s6e8aa0_driver = {
.probe = s6e8aa0_probe,
.remove = s6e8aa0_remove,
.driver = {
.name = "panel-samsung-s6e8aa0",
.of_match_table = s6e8aa0_of_match,
},
};
module_mipi_dsi_driver(s6e8aa0_driver);
MODULE_AUTHOR("Donghwa Lee <[email protected]>");
MODULE_AUTHOR("Inki Dae <[email protected]>");
MODULE_AUTHOR("Joongmock Shin <[email protected]>");
MODULE_AUTHOR("Eunchul Kim <[email protected]>");
MODULE_AUTHOR("Tomasz Figa <[email protected]>");
MODULE_AUTHOR("Andrzej Hajda <[email protected]>");
MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_print.h>
#include "panel-samsung-s6e63m0.h"
static const u8 s6e63m0_dbi_read_commands[] = {
MCS_READ_ID1,
MCS_READ_ID2,
MCS_READ_ID3,
0, /* sentinel */
};
static int s6e63m0_spi_dcs_read(struct device *dev, void *trsp,
const u8 cmd, u8 *data)
{
struct mipi_dbi *dbi = trsp;
int ret;
ret = mipi_dbi_command_read(dbi, cmd, data);
if (ret)
dev_err(dev, "error on DBI read command %02x\n", cmd);
return ret;
}
static int s6e63m0_spi_dcs_write(struct device *dev, void *trsp,
const u8 *data, size_t len)
{
struct mipi_dbi *dbi = trsp;
int ret;
ret = mipi_dbi_command_stackbuf(dbi, data[0], (data + 1), (len - 1));
usleep_range(300, 310);
return ret;
}
static int s6e63m0_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi *dbi;
int ret;
dbi = devm_kzalloc(dev, sizeof(*dbi), GFP_KERNEL);
if (!dbi)
return -ENOMEM;
ret = mipi_dbi_spi_init(spi, dbi, NULL);
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
/* Register our custom MCS read commands */
dbi->read_commands = s6e63m0_dbi_read_commands;
return s6e63m0_probe(dev, dbi, s6e63m0_spi_dcs_read,
s6e63m0_spi_dcs_write, false);
}
static void s6e63m0_spi_remove(struct spi_device *spi)
{
s6e63m0_remove(&spi->dev);
}
static const struct of_device_id s6e63m0_spi_of_match[] = {
{ .compatible = "samsung,s6e63m0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, s6e63m0_spi_of_match);
static struct spi_driver s6e63m0_spi_driver = {
.probe = s6e63m0_spi_probe,
.remove = s6e63m0_spi_remove,
.driver = {
.name = "panel-samsung-s6e63m0",
.of_match_table = s6e63m0_spi_of_match,
},
};
module_spi_driver(s6e63m0_spi_driver);
MODULE_AUTHOR("Paweł Chmiel <[email protected]>");
MODULE_DESCRIPTION("s6e63m0 LCD SPI Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c |
/*
* Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
/**
* struct panel_desc - Describes a simple panel.
*/
struct panel_desc {
/**
* @modes: Pointer to array of fixed modes appropriate for this panel.
*
* If only one mode then this can just be the address of the mode.
* NOTE: cannot be used with "timings" and also if this is specified
* then you cannot override the mode in the device tree.
*/
const struct drm_display_mode *modes;
/** @num_modes: Number of elements in modes array. */
unsigned int num_modes;
/**
* @timings: Pointer to array of display timings
*
* NOTE: cannot be used with "modes" and also these will be used to
* validate a device tree override if one is present.
*/
const struct display_timing *timings;
/** @num_timings: Number of elements in timings array. */
unsigned int num_timings;
/** @bpc: Bits per color. */
unsigned int bpc;
/** @size: Structure containing the physical size of this panel. */
struct {
/**
* @size.width: Width (in mm) of the active display area.
*/
unsigned int width;
/**
* @size.height: Height (in mm) of the active display area.
*/
unsigned int height;
} size;
/** @delay: Structure containing various delay values for this panel. */
struct {
/**
* @delay.prepare: Time for the panel to become ready.
*
* The time (in milliseconds) that it takes for the panel to
* become ready and start receiving video data
*/
unsigned int prepare;
/**
* @delay.enable: Time for the panel to display a valid frame.
*
* The time (in milliseconds) that it takes for the panel to
* display the first valid frame after starting to receive
* video data.
*/
unsigned int enable;
/**
* @delay.disable: Time for the panel to turn the display off.
*
* The time (in milliseconds) that it takes for the panel to
* turn the display off (no content is visible).
*/
unsigned int disable;
/**
* @delay.unprepare: Time to power down completely.
*
* The time (in milliseconds) that it takes for the panel
* to power itself down completely.
*
* This time is used to prevent a future "prepare" from
* starting until at least this many milliseconds has passed.
* If at prepare time less time has passed since unprepare
* finished, the driver waits for the remaining time.
*/
unsigned int unprepare;
} delay;
/** @bus_format: See MEDIA_BUS_FMT_... defines. */
u32 bus_format;
/** @bus_flags: See DRM_BUS_FLAG_... defines. */
u32 bus_flags;
/** @connector_type: LVDS, eDP, DSI, DPI, etc. */
int connector_type;
};
struct panel_simple {
struct drm_panel base;
bool enabled;
bool prepared;
ktime_t unprepared_time;
const struct panel_desc *desc;
struct regulator *supply;
struct i2c_adapter *ddc;
struct gpio_desc *enable_gpio;
struct edid *edid;
struct drm_display_mode override_mode;
enum drm_panel_orientation orientation;
};
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
{
return container_of(panel, struct panel_simple, base);
}
static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
unsigned int i, num = 0;
for (i = 0; i < panel->desc->num_timings; i++) {
const struct display_timing *dt = &panel->desc->timings[i];
struct videomode vm;
videomode_from_timing(dt, &vm);
mode = drm_mode_create(connector->dev);
if (!mode) {
dev_err(panel->base.dev, "failed to add mode %ux%u\n",
dt->hactive.typ, dt->vactive.typ);
continue;
}
drm_display_mode_from_videomode(&vm, mode);
mode->type |= DRM_MODE_TYPE_DRIVER;
if (panel->desc->num_timings == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num++;
}
return num;
}
static unsigned int panel_simple_get_display_modes(struct panel_simple *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
unsigned int i, num = 0;
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay,
drm_mode_vrefresh(m));
continue;
}
mode->type |= DRM_MODE_TYPE_DRIVER;
if (panel->desc->num_modes == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
num++;
}
return num;
}
static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
bool has_override = panel->override_mode.type;
unsigned int num = 0;
if (!panel->desc)
return 0;
if (has_override) {
mode = drm_mode_duplicate(connector->dev,
&panel->override_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
num = 1;
} else {
dev_err(panel->base.dev, "failed to add override mode\n");
}
}
/* Only add timings if override was not there or failed to validate */
if (num == 0 && panel->desc->num_timings)
num = panel_simple_get_timings_modes(panel, connector);
/*
* Only add fixed modes if timings/override added no mode.
*
* We should only ever have either the display timings specified
* or a fixed mode. Anything else is rather bogus.
*/
WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
if (num == 0)
num = panel_simple_get_display_modes(panel, connector);
connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
if (panel->desc->bus_format)
drm_display_info_set_bus_formats(&connector->display_info,
&panel->desc->bus_format, 1);
connector->display_info.bus_flags = panel->desc->bus_flags;
return num;
}
static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
{
ktime_t now_ktime, min_ktime;
if (!min_ms)
return;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
}
static int panel_simple_disable(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
if (!p->enabled)
return 0;
if (p->desc->delay.disable)
msleep(p->desc->delay.disable);
p->enabled = false;
return 0;
}
static int panel_simple_suspend(struct device *dev)
{
struct panel_simple *p = dev_get_drvdata(dev);
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
p->unprepared_time = ktime_get_boottime();
kfree(p->edid);
p->edid = NULL;
return 0;
}
static int panel_simple_unprepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
int ret;
/* Unpreparing when already unprepared is a no-op */
if (!p->prepared)
return 0;
pm_runtime_mark_last_busy(panel->dev);
ret = pm_runtime_put_autosuspend(panel->dev);
if (ret < 0)
return ret;
p->prepared = false;
return 0;
}
static int panel_simple_resume(struct device *dev)
{
struct panel_simple *p = dev_get_drvdata(dev);
int err;
panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
err = regulator_enable(p->supply);
if (err < 0) {
dev_err(dev, "failed to enable supply: %d\n", err);
return err;
}
gpiod_set_value_cansleep(p->enable_gpio, 1);
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
return 0;
}
static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
int ret;
/* Preparing when already prepared is a no-op */
if (p->prepared)
return 0;
ret = pm_runtime_get_sync(panel->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(panel->dev);
return ret;
}
p->prepared = true;
return 0;
}
static int panel_simple_enable(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
if (p->enabled)
return 0;
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
p->enabled = true;
return 0;
}
static int panel_simple_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct panel_simple *p = to_panel_simple(panel);
int num = 0;
/* probe EDID if a DDC bus is available */
if (p->ddc) {
pm_runtime_get_sync(panel->dev);
if (!p->edid)
p->edid = drm_get_edid(connector, p->ddc);
if (p->edid)
num += drm_add_edid_modes(connector, p->edid);
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
}
/* add hard-coded panel modes */
num += panel_simple_get_non_edid_modes(p, connector);
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, p->orientation);
return num;
}
static int panel_simple_get_timings(struct drm_panel *panel,
unsigned int num_timings,
struct display_timing *timings)
{
struct panel_simple *p = to_panel_simple(panel);
unsigned int i;
if (p->desc->num_timings < num_timings)
num_timings = p->desc->num_timings;
if (timings)
for (i = 0; i < num_timings; i++)
timings[i] = p->desc->timings[i];
return p->desc->num_timings;
}
static enum drm_panel_orientation panel_simple_get_orientation(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
return p->orientation;
}
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
.unprepare = panel_simple_unprepare,
.prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
.get_orientation = panel_simple_get_orientation,
.get_timings = panel_simple_get_timings,
};
static struct panel_desc panel_dpi;
static int panel_dpi_probe(struct device *dev,
struct panel_simple *panel)
{
struct display_timing *timing;
const struct device_node *np;
struct panel_desc *desc;
unsigned int bus_flags;
struct videomode vm;
int ret;
np = dev->of_node;
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
if (!timing)
return -ENOMEM;
ret = of_get_display_timing(np, "panel-timing", timing);
if (ret < 0) {
dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
np);
return ret;
}
desc->timings = timing;
desc->num_timings = 1;
of_property_read_u32(np, "width-mm", &desc->size.width);
of_property_read_u32(np, "height-mm", &desc->size.height);
/* Extract bus_flags from display_timing */
bus_flags = 0;
vm.flags = timing->flags;
drm_bus_flags_from_videomode(&vm, &bus_flags);
desc->bus_flags = bus_flags;
/* We do not know the connector for the DT node, so guess it */
desc->connector_type = DRM_MODE_CONNECTOR_DPI;
panel->desc = desc;
return 0;
}
#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
(to_check->field.typ >= bounds->field.min && \
to_check->field.typ <= bounds->field.max)
static void panel_simple_parse_panel_timing_node(struct device *dev,
struct panel_simple *panel,
const struct display_timing *ot)
{
const struct panel_desc *desc = panel->desc;
struct videomode vm;
unsigned int i;
if (WARN_ON(desc->num_modes)) {
dev_err(dev, "Reject override mode: panel has a fixed mode\n");
return;
}
if (WARN_ON(!desc->num_timings)) {
dev_err(dev, "Reject override mode: no timings specified\n");
return;
}
for (i = 0; i < panel->desc->num_timings; i++) {
const struct display_timing *dt = &panel->desc->timings[i];
if (!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hactive) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hfront_porch) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hback_porch) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hsync_len) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vactive) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vfront_porch) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vback_porch) ||
!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vsync_len))
continue;
if (ot->flags != dt->flags)
continue;
videomode_from_timing(ot, &vm);
drm_display_mode_from_videomode(&vm, &panel->override_mode);
panel->override_mode.type |= DRM_MODE_TYPE_DRIVER |
DRM_MODE_TYPE_PREFERRED;
break;
}
if (WARN_ON(!panel->override_mode.type))
dev_err(dev, "Reject override mode: No display_timing found\n");
}
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct panel_simple *panel;
struct display_timing dt;
struct device_node *ddc;
int connector_type;
u32 bus_flags;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return -ENOMEM;
panel->enabled = false;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio))
return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
"failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
return err;
}
ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
if (ddc) {
panel->ddc = of_find_i2c_adapter_by_node(ddc);
of_node_put(ddc);
if (!panel->ddc)
return -EPROBE_DEFER;
}
if (desc == &panel_dpi) {
/* Handle the generic panel-dpi binding */
err = panel_dpi_probe(dev, panel);
if (err)
goto free_ddc;
desc = panel->desc;
} else {
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
}
connector_type = desc->connector_type;
/* Catch common mistakes for panels. */
switch (connector_type) {
case 0:
dev_warn(dev, "Specify missing connector_type\n");
connector_type = DRM_MODE_CONNECTOR_DPI;
break;
case DRM_MODE_CONNECTOR_LVDS:
WARN_ON(desc->bus_flags &
~(DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
DRM_BUS_FLAG_DATA_LSB_TO_MSB));
WARN_ON(desc->bus_format != MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_SPWG &&
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA);
WARN_ON(desc->bus_format == MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
desc->bpc != 6);
WARN_ON((desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG ||
desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) &&
desc->bpc != 8);
break;
case DRM_MODE_CONNECTOR_eDP:
dev_warn(dev, "eDP panels moved to panel-edp\n");
err = -EINVAL;
goto free_ddc;
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
break;
case DRM_MODE_CONNECTOR_DPI:
bus_flags = DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
DRM_BUS_FLAG_DATA_LSB_TO_MSB |
DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
if (desc->bus_flags & ~bus_flags)
dev_warn(dev, "Unexpected bus_flags(%d)\n", desc->bus_flags & ~bus_flags);
if (!(desc->bus_flags & bus_flags))
dev_warn(dev, "Specify missing bus_flags\n");
if (desc->bus_format == 0)
dev_warn(dev, "Specify missing bus_format\n");
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
break;
default:
dev_warn(dev, "Specify a valid connector_type: %d\n", desc->connector_type);
connector_type = DRM_MODE_CONNECTOR_DPI;
break;
}
dev_set_drvdata(dev, panel);
/*
* We use runtime PM for prepare / unprepare since those power the panel
* on and off and those can be very slow operations. This is important
* to optimize powering the panel on briefly to read the EDID before
* fully enabling the panel.
*/
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
err = drm_panel_of_backlight(&panel->base);
if (err) {
dev_err_probe(dev, err, "Could not find backlight\n");
goto disable_pm_runtime;
}
drm_panel_add(&panel->base);
return 0;
disable_pm_runtime:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
return err;
}
static void panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
drm_panel_remove(&panel->base);
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
if (panel->ddc)
put_device(&panel->ddc->dev);
}
static void panel_simple_shutdown(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
}
static const struct drm_display_mode ampire_am_1280800n3tzqw_t00h_mode = {
.clock = 71100,
.hdisplay = 1280,
.hsync_start = 1280 + 40,
.hsync_end = 1280 + 40 + 80,
.htotal = 1280 + 40 + 80 + 40,
.vdisplay = 800,
.vsync_start = 800 + 3,
.vsync_end = 800 + 3 + 10,
.vtotal = 800 + 3 + 10 + 10,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct panel_desc ampire_am_1280800n3tzqw_t00h = {
.modes = &ire_am_1280800n3tzqw_t00h_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 2,
.hsync_end = 480 + 2 + 41,
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
.modes = &ire_am_480272h3tmqw_t01h_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 99,
.height = 58,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
.clock = 33333,
.hdisplay = 800,
.hsync_start = 800 + 0,
.hsync_end = 800 + 0 + 255,
.htotal = 800 + 0 + 255 + 0,
.vdisplay = 480,
.vsync_start = 480 + 2,
.vsync_end = 480 + 2 + 45,
.vtotal = 480 + 2 + 45 + 0,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct display_timing ampire_am_800480l1tmqw_t00h_timing = {
.pixelclock = { 29930000, 33260000, 36590000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 1, 40, 168 },
.hback_porch = { 88, 88, 88 },
.hsync_len = { 1, 128, 128 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 1, 35, 37 },
.vback_porch = { 8, 8, 8 },
.vsync_len = { 1, 2, 2 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc ampire_am_800480l1tmqw_t00h = {
.timings = &ire_am_800480l1tmqw_t00h_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 111,
.height = 67,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct panel_desc ampire_am800480r3tmqwa1h = {
.modes = &ire_am800480r3tmqwa1h_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct display_timing ampire_am800600p5tmqw_tb8h_timing = {
.pixelclock = { 34500000, 39600000, 50400000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 12, 112, 312 },
.hback_porch = { 87, 87, 48 },
.hsync_len = { 1, 1, 40 },
.vactive = { 600, 600, 600 },
.vfront_porch = { 1, 21, 61 },
.vback_porch = { 38, 38, 19 },
.vsync_len = { 1, 1, 20 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc ampire_am800600p5tmqwtb8h = {
.timings = &ire_am800600p5tmqw_tb8h_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 162,
.height = 122,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 16, 210, 354 },
.hback_porch = { 45, 36, 6 },
.hsync_len = { 1, 10, 40 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 7, 22, 147 },
.vback_porch = { 22, 13, 3 },
.vsync_len = { 1, 10, 20 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
};
static const struct panel_desc armadeus_st0700_adapt = {
.timings = &santek_st0700i5y_rbslw_f_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct drm_display_mode auo_b101aw03_mode = {
.clock = 51450,
.hdisplay = 1024,
.hsync_start = 1024 + 156,
.hsync_end = 1024 + 156 + 8,
.htotal = 1024 + 156 + 8 + 156,
.vdisplay = 600,
.vsync_start = 600 + 16,
.vsync_end = 600 + 16 + 6,
.vtotal = 600 + 16 + 6 + 16,
};
static const struct panel_desc auo_b101aw03 = {
.modes = &auo_b101aw03_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 223,
.height = 125,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
.hsync_start = 1366 + 20,
.hsync_end = 1366 + 20 + 70,
.htotal = 1366 + 20 + 70,
.vdisplay = 768,
.vsync_start = 768 + 14,
.vsync_end = 768 + 14 + 42,
.vtotal = 768 + 14 + 42,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc auo_b101xtn01 = {
.modes = &auo_b101xtn01_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 223,
.height = 125,
},
};
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 20, 40, 200 },
.hback_porch = { 87, 40, 1 },
.hsync_len = { 1, 48, 87 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 5, 13, 200 },
.vback_porch = { 31, 31, 29 },
.vsync_len = { 1, 1, 3 },
};
static const struct panel_desc auo_g070vvn01 = {
.timings = &auo_g070vvn01_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.delay = {
.prepare = 200,
.enable = 50,
.disable = 50,
.unprepare = 1000,
},
};
static const struct drm_display_mode auo_g101evn010_mode = {
.clock = 68930,
.hdisplay = 1280,
.hsync_start = 1280 + 82,
.hsync_end = 1280 + 82 + 2,
.htotal = 1280 + 82 + 2 + 84,
.vdisplay = 800,
.vsync_start = 800 + 8,
.vsync_end = 800 + 8 + 2,
.vtotal = 800 + 8 + 2 + 6,
};
static const struct panel_desc auo_g101evn010 = {
.modes = &auo_g101evn010_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 216,
.htotal = 800 + 40 + 216 + 128,
.vdisplay = 600,
.vsync_start = 600 + 10,
.vsync_end = 600 + 10 + 35,
.vtotal = 600 + 10 + 35 + 2,
};
static const struct panel_desc auo_g104sn02 = {
.modes = &auo_g104sn02_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 211,
.height = 158,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g121ean01_timing = {
.pixelclock = { 60000000, 74400000, 90000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 20, 50, 100 },
.hback_porch = { 20, 50, 100 },
.hsync_len = { 30, 100, 200 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 2, 10, 25 },
.vback_porch = { 2, 10, 25 },
.vsync_len = { 4, 18, 50 },
};
static const struct panel_desc auo_g121ean01 = {
.timings = &auo_g121ean01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 261,
.height = 163,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g133han01_timings = {
.pixelclock = { 134000000, 141200000, 149000000 },
.hactive = { 1920, 1920, 1920 },
.hfront_porch = { 39, 58, 77 },
.hback_porch = { 59, 88, 117 },
.hsync_len = { 28, 42, 56 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 3, 8, 11 },
.vback_porch = { 5, 14, 19 },
.vsync_len = { 4, 14, 19 },
};
static const struct panel_desc auo_g133han01 = {
.timings = &auo_g133han01_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 293,
.height = 165,
},
.delay = {
.prepare = 200,
.enable = 50,
.disable = 50,
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_g156xtn01_mode = {
.clock = 76000,
.hdisplay = 1366,
.hsync_start = 1366 + 33,
.hsync_end = 1366 + 33 + 67,
.htotal = 1560,
.vdisplay = 768,
.vsync_start = 768 + 4,
.vsync_end = 768 + 4 + 4,
.vtotal = 806,
};
static const struct panel_desc auo_g156xtn01 = {
.modes = &auo_g156xtn01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 344,
.height = 194,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g185han01_timings = {
.pixelclock = { 120000000, 144000000, 175000000 },
.hactive = { 1920, 1920, 1920 },
.hfront_porch = { 36, 120, 148 },
.hback_porch = { 24, 88, 108 },
.hsync_len = { 20, 48, 64 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 6, 10, 40 },
.vback_porch = { 2, 5, 20 },
.vsync_len = { 2, 5, 20 },
};
static const struct panel_desc auo_g185han01 = {
.timings = &auo_g185han01_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 409,
.height = 230,
},
.delay = {
.prepare = 50,
.enable = 200,
.disable = 110,
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g190ean01_timings = {
.pixelclock = { 90000000, 108000000, 135000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 126, 184, 1266 },
.hback_porch = { 84, 122, 844 },
.hsync_len = { 70, 102, 704 },
.vactive = { 1024, 1024, 1024 },
.vfront_porch = { 4, 26, 76 },
.vback_porch = { 2, 8, 25 },
.vsync_len = { 2, 8, 25 },
};
static const struct panel_desc auo_g190ean01 = {
.timings = &auo_g190ean01_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 376,
.height = 301,
},
.delay = {
.prepare = 50,
.enable = 200,
.disable = 110,
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
.hfront_porch = { 25, 50, 130 },
.hback_porch = { 25, 50, 130 },
.hsync_len = { 20, 40, 105 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 8, 17, 150 },
.vback_porch = { 8, 17, 150 },
.vsync_len = { 4, 11, 100 },
};
static const struct panel_desc auo_p320hvn03 = {
.timings = &auo_p320hvn03_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 698,
.height = 393,
},
.delay = {
.prepare = 1,
.enable = 450,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
.clock = 148800,
.hdisplay = 1920,
.hsync_start = 1920 + 88,
.hsync_end = 1920 + 88 + 44,
.htotal = 1920 + 88 + 44 + 148,
.vdisplay = 1080,
.vsync_start = 1080 + 4,
.vsync_end = 1080 + 4 + 5,
.vtotal = 1080 + 4 + 5 + 36,
};
static const struct panel_desc auo_t215hvn01 = {
.modes = &auo_t215hvn01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 430,
.height = 270,
},
.delay = {
.disable = 5,
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode avic_tm070ddh03_mode = {
.clock = 51200,
.hdisplay = 1024,
.hsync_start = 1024 + 160,
.hsync_end = 1024 + 160 + 4,
.htotal = 1024 + 160 + 4 + 156,
.vdisplay = 600,
.vsync_start = 600 + 17,
.vsync_end = 600 + 17 + 1,
.vtotal = 600 + 17 + 1 + 17,
};
static const struct panel_desc avic_tm070ddh03 = {
.modes = &avic_tm070ddh03_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 90,
},
.delay = {
.prepare = 20,
.enable = 200,
.disable = 200,
},
};
static const struct drm_display_mode bananapi_s070wv20_ct16_mode = {
.clock = 30000,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 48,
.htotal = 800 + 40 + 48 + 40,
.vdisplay = 480,
.vsync_start = 480 + 13,
.vsync_end = 480 + 13 + 3,
.vtotal = 480 + 13 + 3 + 29,
};
static const struct panel_desc bananapi_s070wv20_ct16 = {
.modes = &bananapi_s070wv20_ct16_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
};
static const struct display_timing boe_ev121wxm_n10_1850_timing = {
.pixelclock = { 69922000, 71000000, 72293000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 48, 48, 48 },
.hback_porch = { 80, 80, 80 },
.hsync_len = { 32, 32, 32 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 3, 3, 3 },
.vback_porch = { 14, 14, 14 },
.vsync_len = { 6, 6, 6 },
};
static const struct panel_desc boe_ev121wxm_n10_1850 = {
.timings = &boe_ev121wxm_n10_1850_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 261,
.height = 163,
},
.delay = {
.prepare = 9,
.enable = 300,
.unprepare = 300,
.disable = 560,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode boe_hv070wsa_mode = {
.clock = 42105,
.hdisplay = 1024,
.hsync_start = 1024 + 30,
.hsync_end = 1024 + 30 + 30,
.htotal = 1024 + 30 + 30 + 30,
.vdisplay = 600,
.vsync_start = 600 + 10,
.vsync_end = 600 + 10 + 10,
.vtotal = 600 + 10 + 10 + 10,
};
static const struct panel_desc boe_hv070wsa = {
.modes = &boe_hv070wsa_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 90,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 5,
.hsync_end = 480 + 5 + 5,
.htotal = 480 + 5 + 5 + 40,
.vdisplay = 272,
.vsync_start = 272 + 8,
.vsync_end = 272 + 8 + 8,
.vtotal = 272 + 8 + 8 + 8,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc cdtech_s043wq26h_ct7 = {
.modes = &cdtech_s043wq26h_ct7_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
/* S070PWS19HP-FC21 2017/04/22 */
static const struct drm_display_mode cdtech_s070pws19hp_fc21_mode = {
.clock = 51200,
.hdisplay = 1024,
.hsync_start = 1024 + 160,
.hsync_end = 1024 + 160 + 20,
.htotal = 1024 + 160 + 20 + 140,
.vdisplay = 600,
.vsync_start = 600 + 12,
.vsync_end = 600 + 12 + 3,
.vtotal = 600 + 12 + 3 + 20,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc cdtech_s070pws19hp_fc21 = {
.modes = &cdtech_s070pws19hp_fc21_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
/* S070SWV29HG-DC44 2017/09/21 */
static const struct drm_display_mode cdtech_s070swv29hg_dc44_mode = {
.clock = 33300,
.hdisplay = 800,
.hsync_start = 800 + 210,
.hsync_end = 800 + 210 + 2,
.htotal = 800 + 210 + 2 + 44,
.vdisplay = 480,
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 2,
.vtotal = 480 + 22 + 2 + 21,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc cdtech_s070swv29hg_dc44 = {
.modes = &cdtech_s070swv29hg_dc44_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
.clock = 35000,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 40,
.htotal = 800 + 40 + 40 + 48,
.vdisplay = 480,
.vsync_start = 480 + 29,
.vsync_end = 480 + 29 + 13,
.vtotal = 480 + 29 + 13 + 3,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc cdtech_s070wv95_ct16 = {
.modes = &cdtech_s070wv95_ct16_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 85,
},
};
static const struct display_timing chefree_ch101olhlwh_002_timing = {
.pixelclock = { 68900000, 71100000, 73400000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 65, 80, 95 },
.hback_porch = { 64, 79, 94 },
.hsync_len = { 1, 1, 1 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 7, 11, 14 },
.vback_porch = { 7, 11, 14 },
.vsync_len = { 1, 1, 1 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc chefree_ch101olhlwh_002 = {
.timings = &chefree_ch101olhlwh_002_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 135,
},
.delay = {
.enable = 200,
.disable = 200,
},
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
.clock = 66770,
.hdisplay = 800,
.hsync_start = 800 + 49,
.hsync_end = 800 + 49 + 33,
.htotal = 800 + 49 + 33 + 17,
.vdisplay = 1280,
.vsync_start = 1280 + 1,
.vsync_end = 1280 + 1 + 7,
.vtotal = 1280 + 1 + 7 + 15,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc chunghwa_claa070wp03xg = {
.modes = &chunghwa_claa070wp03xg_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 94,
.height = 150,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
.hsync_start = 1366 + 58,
.hsync_end = 1366 + 58 + 58,
.htotal = 1366 + 58 + 58 + 58,
.vdisplay = 768,
.vsync_start = 768 + 4,
.vsync_end = 768 + 4 + 4,
.vtotal = 768 + 4 + 4 + 4,
};
static const struct panel_desc chunghwa_claa101wa01a = {
.modes = &chunghwa_claa101wa01a_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 220,
.height = 120,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode chunghwa_claa101wb01_mode = {
.clock = 69300,
.hdisplay = 1366,
.hsync_start = 1366 + 48,
.hsync_end = 1366 + 48 + 32,
.htotal = 1366 + 48 + 32 + 20,
.vdisplay = 768,
.vsync_start = 768 + 16,
.vsync_end = 768 + 16 + 8,
.vtotal = 768 + 16 + 8 + 16,
};
static const struct panel_desc chunghwa_claa101wb01 = {
.modes = &chunghwa_claa101wb01_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 223,
.height = 125,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dataimage_fg040346dsswbg04_timing = {
.pixelclock = { 5000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
.hfront_porch = { 12, 12, 12 },
.hback_porch = { 12, 12, 12 },
.hsync_len = { 21, 21, 21 },
.vactive = { 272, 272, 272 },
.vfront_porch = { 4, 4, 4 },
.vback_porch = { 4, 4, 4 },
.vsync_len = { 8, 8, 8 },
};
static const struct panel_desc dataimage_fg040346dsswbg04 = {
.timings = &dataimage_fg040346dsswbg04_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing dataimage_fg1001l0dsswmg01_timing = {
.pixelclock = { 68900000, 71110000, 73400000 },
.hactive = { 1280, 1280, 1280 },
.vactive = { 800, 800, 800 },
.hback_porch = { 100, 100, 100 },
.hfront_porch = { 100, 100, 100 },
.vback_porch = { 5, 5, 5 },
.vfront_porch = { 5, 5, 5 },
.hsync_len = { 24, 24, 24 },
.vsync_len = { 3, 3, 3 },
.flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc dataimage_fg1001l0dsswmg01 = {
.timings = &dataimage_fg1001l0dsswmg01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
};
static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
.clock = 33260,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 128,
.htotal = 800 + 40 + 128 + 88,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 2,
.vtotal = 480 + 10 + 2 + 33,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc dataimage_scf0700c48ggu18 = {
.modes = &dataimage_scf0700c48ggu18_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
.pixelclock = { 45000000, 51200000, 57000000 },
.hactive = { 1024, 1024, 1024 },
.hfront_porch = { 100, 106, 113 },
.hback_porch = { 100, 106, 113 },
.hsync_len = { 100, 108, 114 },
.vactive = { 600, 600, 600 },
.vfront_porch = { 8, 11, 15 },
.vback_porch = { 8, 11, 15 },
.vsync_len = { 9, 13, 15 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc dlc_dlc0700yzg_1 = {
.timings = &dlc_dlc0700yzg_1_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
.delay = {
.prepare = 30,
.enable = 200,
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dlc_dlc1010gig_timing = {
.pixelclock = { 68900000, 71100000, 73400000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 43, 53, 63 },
.hback_porch = { 43, 53, 63 },
.hsync_len = { 44, 54, 64 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 8, 11 },
.vback_porch = { 5, 8, 11 },
.vsync_len = { 5, 7, 11 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc dlc_dlc1010gig = {
.timings = &dlc_dlc1010gig_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 216,
.height = 135,
},
.delay = {
.prepare = 60,
.enable = 150,
.disable = 100,
.unprepare = 60,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_et035012dm6_mode = {
.clock = 6500,
.hdisplay = 320,
.hsync_start = 320 + 20,
.hsync_end = 320 + 20 + 30,
.htotal = 320 + 20 + 68,
.vdisplay = 240,
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 4,
.vtotal = 240 + 4 + 4 + 14,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc edt_et035012dm6 = {
.modes = &edt_et035012dm6_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 70,
.height = 52,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct drm_display_mode edt_etm0350g0dh6_mode = {
.clock = 6520,
.hdisplay = 320,
.hsync_start = 320 + 20,
.hsync_end = 320 + 20 + 68,
.htotal = 320 + 20 + 68,
.vdisplay = 240,
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 18,
.vtotal = 240 + 4 + 18,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc edt_etm0350g0dh6 = {
.modes = &edt_etm0350g0dh6_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 70,
.height = 53,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode edt_etm043080dh6gp_mode = {
.clock = 10870,
.hdisplay = 480,
.hsync_start = 480 + 8,
.hsync_end = 480 + 8 + 4,
.htotal = 480 + 8 + 4 + 41,
/*
* IWG22M: Y resolution changed for "dc_linuxfb" module crashing while
* fb_align
*/
.vdisplay = 288,
.vsync_start = 288 + 2,
.vsync_end = 288 + 2 + 4,
.vtotal = 288 + 2 + 4 + 10,
};
static const struct panel_desc edt_etm043080dh6gp = {
.modes = &edt_etm043080dh6gp_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 100,
.height = 65,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode edt_etm0430g0dh6_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 2,
.hsync_end = 480 + 2 + 41,
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc edt_etm0430g0dh6 = {
.modes = &edt_etm0430g0dh6_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
.hsync_start = 640 + 16,
.hsync_end = 640 + 16 + 30,
.htotal = 640 + 16 + 30 + 114,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 3,
.vtotal = 480 + 10 + 3 + 32,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc edt_et057090dhu = {
.modes = &edt_et057090dhu_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 115,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode edt_etm0700g0dh6_mode = {
.clock = 33260,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 128,
.htotal = 800 + 40 + 128 + 88,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 2,
.vtotal = 480 + 10 + 2 + 33,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc edt_etm0700g0dh6 = {
.modes = &edt_etm0700g0dh6_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct panel_desc edt_etm0700g0bdh6 = {
.modes = &edt_etm0700g0dh6_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing edt_etml0700y5dha_timing = {
.pixelclock = { 40800000, 51200000, 67200000 },
.hactive = { 1024, 1024, 1024 },
.hfront_porch = { 30, 106, 125 },
.hback_porch = { 30, 106, 125 },
.hsync_len = { 30, 108, 126 },
.vactive = { 600, 600, 600 },
.vfront_porch = { 3, 12, 67},
.vback_porch = { 3, 12, 67 },
.vsync_len = { 4, 11, 66 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc edt_etml0700y5dha = {
.timings = &edt_etml0700y5dha_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 155,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_etmv570g2dhu_mode = {
.clock = 25175,
.hdisplay = 640,
.hsync_start = 640,
.hsync_end = 640 + 16,
.htotal = 640 + 16 + 30 + 114,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 3,
.vtotal = 480 + 10 + 3 + 35,
.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_PHSYNC,
};
static const struct panel_desc edt_etmv570g2dhu = {
.modes = &edt_etmv570g2dhu_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 115,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing eink_vb3300_kca_timing = {
.pixelclock = { 40000000, 40000000, 40000000 },
.hactive = { 334, 334, 334 },
.hfront_porch = { 1, 1, 1 },
.hback_porch = { 1, 1, 1 },
.hsync_len = { 1, 1, 1 },
.vactive = { 1405, 1405, 1405 },
.vfront_porch = { 1, 1, 1 },
.vback_porch = { 1, 1, 1 },
.vsync_len = { 1, 1, 1 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
static const struct panel_desc eink_vb3300_kca = {
.timings = &eink_vb3300_kca_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 157,
.height = 209,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing evervision_vgg804821_timing = {
.pixelclock = { 27600000, 33300000, 50000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 40, 66, 70 },
.hback_porch = { 40, 67, 70 },
.hsync_len = { 40, 67, 70 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 6, 10, 10 },
.vback_porch = { 7, 11, 11 },
.vsync_len = { 7, 11, 11 },
.flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
DISPLAY_FLAGS_SYNC_NEGEDGE,
};
static const struct panel_desc evervision_vgg804821 = {
.timings = &evervision_vgg804821_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 108,
.height = 64,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
.clock = 32260,
.hdisplay = 800,
.hsync_start = 800 + 168,
.hsync_end = 800 + 168 + 64,
.htotal = 800 + 168 + 64 + 88,
.vdisplay = 480,
.vsync_start = 480 + 37,
.vsync_end = 480 + 37 + 2,
.vtotal = 480 + 37 + 2 + 8,
};
static const struct panel_desc foxlink_fl500wvr00_a0t = {
.modes = &foxlink_fl500wvr00_a0t_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 108,
.height = 65,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct drm_display_mode frida_frd350h54004_modes[] = {
{ /* 60 Hz */
.clock = 6000,
.hdisplay = 320,
.hsync_start = 320 + 44,
.hsync_end = 320 + 44 + 16,
.htotal = 320 + 44 + 16 + 20,
.vdisplay = 240,
.vsync_start = 240 + 2,
.vsync_end = 240 + 2 + 6,
.vtotal = 240 + 2 + 6 + 2,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
{ /* 50 Hz */
.clock = 5400,
.hdisplay = 320,
.hsync_start = 320 + 56,
.hsync_end = 320 + 56 + 16,
.htotal = 320 + 56 + 16 + 40,
.vdisplay = 240,
.vsync_start = 240 + 2,
.vsync_end = 240 + 2 + 6,
.vtotal = 240 + 2 + 6 + 2,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
};
static const struct panel_desc frida_frd350h54004 = {
.modes = frida_frd350h54004_modes,
.num_modes = ARRAY_SIZE(frida_frd350h54004_modes),
.bpc = 8,
.size = {
.width = 77,
.height = 64,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode friendlyarm_hd702e_mode = {
.clock = 67185,
.hdisplay = 800,
.hsync_start = 800 + 20,
.hsync_end = 800 + 20 + 24,
.htotal = 800 + 20 + 24 + 20,
.vdisplay = 1280,
.vsync_start = 1280 + 4,
.vsync_end = 1280 + 4 + 8,
.vtotal = 1280 + 4 + 8 + 4,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc friendlyarm_hd702e = {
.modes = &friendlyarm_hd702e_mode,
.num_modes = 1,
.size = {
.width = 94,
.height = 151,
},
};
static const struct drm_display_mode giantplus_gpg482739qs5_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 5,
.hsync_end = 480 + 5 + 1,
.htotal = 480 + 5 + 1 + 40,
.vdisplay = 272,
.vsync_start = 272 + 8,
.vsync_end = 272 + 8 + 1,
.vtotal = 272 + 8 + 1 + 8,
};
static const struct panel_desc giantplus_gpg482739qs5 = {
.modes = &giantplus_gpg482739qs5_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct display_timing giantplus_gpm940b0_timing = {
.pixelclock = { 13500000, 27000000, 27500000 },
.hactive = { 320, 320, 320 },
.hfront_porch = { 14, 686, 718 },
.hback_porch = { 50, 70, 255 },
.hsync_len = { 1, 1, 1 },
.vactive = { 240, 240, 240 },
.vfront_porch = { 1, 1, 179 },
.vback_porch = { 1, 21, 31 },
.vsync_len = { 1, 1, 6 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc giantplus_gpm940b0 = {
.timings = &giantplus_gpm940b0_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 60,
.height = 45,
},
.bus_format = MEDIA_BUS_FMT_RGB888_3X8,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct display_timing hannstar_hsd070pww1_timing = {
.pixelclock = { 64300000, 71100000, 82000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 1, 1, 10 },
.hback_porch = { 1, 1, 10 },
/*
* According to the data sheet, the minimum horizontal blanking interval
* is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the
* minimum working horizontal blanking interval to be 60 clocks.
*/
.hsync_len = { 58, 158, 661 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 1, 1, 10 },
.vback_porch = { 1, 1, 10 },
.vsync_len = { 1, 21, 203 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc hannstar_hsd070pww1 = {
.timings = &hannstar_hsd070pww1_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 151,
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
.pixelclock = { 55000000, 65000000, 75000000 },
.hactive = { 1024, 1024, 1024 },
.hfront_porch = { 40, 40, 40 },
.hback_porch = { 220, 220, 220 },
.hsync_len = { 20, 60, 100 },
.vactive = { 768, 768, 768 },
.vfront_porch = { 7, 7, 7 },
.vback_porch = { 21, 21, 21 },
.vsync_len = { 10, 10, 10 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc hannstar_hsd100pxn1 = {
.timings = &hannstar_hsd100pxn1_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 203,
.height = 152,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd101pww2_timing = {
.pixelclock = { 64300000, 71100000, 82000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 1, 1, 10 },
.hback_porch = { 1, 1, 10 },
.hsync_len = { 58, 158, 661 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 1, 1, 10 },
.vback_porch = { 1, 1, 10 },
.vsync_len = { 1, 21, 203 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc hannstar_hsd101pww2 = {
.timings = &hannstar_hsd101pww2_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
.clock = 33333,
.hdisplay = 800,
.hsync_start = 800 + 85,
.hsync_end = 800 + 85 + 86,
.htotal = 800 + 85 + 86 + 85,
.vdisplay = 480,
.vsync_start = 480 + 16,
.vsync_end = 480 + 16 + 13,
.vtotal = 480 + 16 + 13 + 16,
};
static const struct panel_desc hitachi_tx23d38vm0caa = {
.modes = &hitachi_tx23d38vm0caa_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 195,
.height = 117,
},
.delay = {
.enable = 160,
.disable = 160,
},
};
static const struct drm_display_mode innolux_at043tn24_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 2,
.hsync_end = 480 + 2 + 41,
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc innolux_at043tn24 = {
.modes = &innolux_at043tn24_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.connector_type = DRM_MODE_CONNECTOR_DPI,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode innolux_at070tn92_mode = {
.clock = 33333,
.hdisplay = 800,
.hsync_start = 800 + 210,
.hsync_end = 800 + 210 + 20,
.htotal = 800 + 210 + 20 + 46,
.vdisplay = 480,
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 10,
.vtotal = 480 + 22 + 23 + 10,
};
static const struct panel_desc innolux_at070tn92 = {
.modes = &innolux_at070tn92_mode,
.num_modes = 1,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct display_timing innolux_g070ace_l01_timing = {
.pixelclock = { 25200000, 35000000, 35700000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 30, 32, 87 },
.hback_porch = { 30, 32, 87 },
.hsync_len = { 1, 1, 1 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 3, 3, 3 },
.vback_porch = { 13, 13, 13 },
.vsync_len = { 1, 1, 4 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc innolux_g070ace_l01 = {
.timings = &innolux_g070ace_l01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.delay = {
.prepare = 10,
.enable = 50,
.disable = 50,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g070y2_l01_timing = {
.pixelclock = { 28000000, 29500000, 32000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 61, 91, 141 },
.hback_porch = { 60, 90, 140 },
.hsync_len = { 12, 12, 12 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 4, 9, 30 },
.vback_porch = { 4, 8, 28 },
.vsync_len = { 2, 2, 2 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc innolux_g070y2_l01 = {
.timings = &innolux_g070y2_l01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.delay = {
.prepare = 10,
.enable = 100,
.disable = 100,
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g070y2_t02_mode = {
.clock = 33333,
.hdisplay = 800,
.hsync_start = 800 + 210,
.hsync_end = 800 + 210 + 20,
.htotal = 800 + 210 + 20 + 46,
.vdisplay = 480,
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 10,
.vtotal = 480 + 22 + 23 + 10,
};
static const struct panel_desc innolux_g070y2_t02 = {
.modes = &innolux_g070y2_t02_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 92,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 41, 80, 100 },
.hback_porch = { 40, 79, 99 },
.hsync_len = { 1, 1, 1 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 11, 14 },
.vback_porch = { 4, 11, 14 },
.vsync_len = { 1, 1, 1 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc innolux_g101ice_l01 = {
.timings = &innolux_g101ice_l01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 135,
},
.delay = {
.enable = 200,
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g121i1_l01_timing = {
.pixelclock = { 67450000, 71000000, 74550000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 40, 80, 160 },
.hback_porch = { 39, 79, 159 },
.hsync_len = { 1, 1, 1 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 11, 100 },
.vback_porch = { 4, 11, 99 },
.vsync_len = { 1, 1, 1 },
};
static const struct panel_desc innolux_g121i1_l01 = {
.timings = &innolux_g121i1_l01_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 261,
.height = 163,
},
.delay = {
.enable = 200,
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1024 + 0,
.hsync_end = 1024 + 1,
.htotal = 1024 + 0 + 1 + 320,
.vdisplay = 768,
.vsync_start = 768 + 38,
.vsync_end = 768 + 38 + 1,
.vtotal = 768 + 38 + 1 + 0,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc innolux_g121x1_l03 = {
.modes = &innolux_g121x1_l03_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 246,
.height = 185,
},
.delay = {
.enable = 200,
.unprepare = 200,
.disable = 400,
},
};
static const struct display_timing innolux_g156hce_l01_timings = {
.pixelclock = { 120000000, 141860000, 150000000 },
.hactive = { 1920, 1920, 1920 },
.hfront_porch = { 80, 90, 100 },
.hback_porch = { 80, 90, 100 },
.hsync_len = { 20, 30, 30 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 3, 10, 20 },
.vback_porch = { 3, 10, 20 },
.vsync_len = { 4, 10, 10 },
};
static const struct panel_desc innolux_g156hce_l01 = {
.timings = &innolux_g156hce_l01_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 344,
.height = 194,
},
.delay = {
.prepare = 1, /* T1+T2 */
.enable = 450, /* T5 */
.disable = 200, /* T6 */
.unprepare = 10, /* T3+T7 */
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
.hsync_start = 1366 + 16,
.hsync_end = 1366 + 16 + 34,
.htotal = 1366 + 16 + 34 + 50,
.vdisplay = 768,
.vsync_start = 768 + 2,
.vsync_end = 768 + 2 + 6,
.vtotal = 768 + 2 + 6 + 12,
};
static const struct panel_desc innolux_n156bge_l21 = {
.modes = &innolux_n156bge_l21_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 344,
.height = 193,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
.hsync_start = 1024 + 128,
.hsync_end = 1024 + 128 + 64,
.htotal = 1024 + 128 + 64 + 128,
.vdisplay = 600,
.vsync_start = 600 + 16,
.vsync_end = 600 + 16 + 4,
.vtotal = 600 + 16 + 4 + 16,
};
static const struct panel_desc innolux_zj070na_01p = {
.modes = &innolux_zj070na_01p_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 90,
},
};
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
.hfront_porch = { 30, 30, 30 },
.hback_porch = { 30, 30, 30 },
.hsync_len = { 1, 5, 17 },
.vactive = { 240, 240, 240 },
.vfront_porch = { 6, 6, 6 },
.vback_porch = { 5, 5, 5 },
.vsync_len = { 1, 2, 11 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx14d24vm1bpa = {
.timings = &koe_tx14d24vm1bpa_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 115,
.height = 86,
},
};
static const struct display_timing koe_tx26d202vm0bwa_timing = {
.pixelclock = { 151820000, 156720000, 159780000 },
.hactive = { 1920, 1920, 1920 },
.hfront_porch = { 105, 130, 142 },
.hback_porch = { 45, 70, 82 },
.hsync_len = { 30, 30, 30 },
.vactive = { 1200, 1200, 1200},
.vfront_porch = { 3, 5, 10 },
.vback_porch = { 2, 5, 10 },
.vsync_len = { 5, 5, 5 },
};
static const struct panel_desc koe_tx26d202vm0bwa = {
.timings = &koe_tx26d202vm0bwa_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.delay = {
.prepare = 1000,
.enable = 1000,
.unprepare = 1000,
.disable = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing koe_tx31d200vm0baa_timing = {
.pixelclock = { 39600000, 43200000, 48000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 16, 36, 56 },
.hback_porch = { 16, 36, 56 },
.hsync_len = { 8, 8, 8 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 6, 21, 33 },
.vback_porch = { 6, 21, 33 },
.vsync_len = { 8, 8, 8 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx31d200vm0baa = {
.timings = &koe_tx31d200vm0baa_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 292,
.height = 109,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing kyo_tcg121xglp_timing = {
.pixelclock = { 52000000, 65000000, 71000000 },
.hactive = { 1024, 1024, 1024 },
.hfront_porch = { 2, 2, 2 },
.hback_porch = { 2, 2, 2 },
.hsync_len = { 86, 124, 244 },
.vactive = { 768, 768, 768 },
.vfront_porch = { 2, 2, 2 },
.vback_porch = { 2, 2, 2 },
.vsync_len = { 6, 34, 73 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc kyo_tcg121xglp = {
.timings = &kyo_tcg121xglp_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 246,
.height = 184,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
.clock = 7000,
.hdisplay = 320,
.hsync_start = 320 + 20,
.hsync_end = 320 + 20 + 30,
.htotal = 320 + 20 + 30 + 38,
.vdisplay = 240,
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 3,
.vtotal = 240 + 4 + 3 + 15,
};
static const struct panel_desc lemaker_bl035_rgb_002 = {
.modes = &lemaker_bl035_rgb_002_mode,
.num_modes = 1,
.size = {
.width = 70,
.height = 52,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_LOW,
};
static const struct drm_display_mode lg_lb070wv8_mode = {
.clock = 33246,
.hdisplay = 800,
.hsync_start = 800 + 88,
.hsync_end = 800 + 88 + 80,
.htotal = 800 + 88 + 80 + 88,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 25,
.vtotal = 480 + 10 + 25 + 10,
};
static const struct panel_desc lg_lb070wv8 = {
.modes = &lg_lb070wv8_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 151,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing logictechno_lt161010_2nh_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 16, 210, 354 },
.hback_porch = { 46, 46, 46 },
.hsync_len = { 1, 20, 40 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 7, 22, 147 },
.vback_porch = { 23, 23, 23 },
.vsync_len = { 1, 10, 20 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc logictechno_lt161010_2nh = {
.timings = &logictechno_lt161010_2nh_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing logictechno_lt170410_2whc_timing = {
.pixelclock = { 68900000, 71100000, 73400000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 23, 60, 71 },
.hback_porch = { 23, 60, 71 },
.hsync_len = { 15, 40, 47 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 7, 10 },
.vback_porch = { 5, 7, 10 },
.vsync_len = { 6, 9, 12 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc logictechno_lt170410_2whc = {
.timings = &logictechno_lt170410_2whc_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode logictechno_lttd800480070_l2rt_mode = {
.clock = 33000,
.hdisplay = 800,
.hsync_start = 800 + 112,
.hsync_end = 800 + 112 + 3,
.htotal = 800 + 112 + 3 + 85,
.vdisplay = 480,
.vsync_start = 480 + 38,
.vsync_end = 480 + 38 + 3,
.vtotal = 480 + 38 + 3 + 29,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc logictechno_lttd800480070_l2rt = {
.modes = &logictechno_lttd800480070_l2rt_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 86,
},
.delay = {
.prepare = 45,
.enable = 100,
.disable = 100,
.unprepare = 45
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode logictechno_lttd800480070_l6wh_rt_mode = {
.clock = 33000,
.hdisplay = 800,
.hsync_start = 800 + 154,
.hsync_end = 800 + 154 + 3,
.htotal = 800 + 154 + 3 + 43,
.vdisplay = 480,
.vsync_start = 480 + 47,
.vsync_end = 480 + 47 + 3,
.vtotal = 480 + 47 + 3 + 20,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
.modes = &logictechno_lttd800480070_l6wh_rt_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 86,
},
.delay = {
.prepare = 45,
.enable = 100,
.disable = 100,
.unprepare = 45
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode logicpd_type_28_mode = {
.clock = 9107,
.hdisplay = 480,
.hsync_start = 480 + 3,
.hsync_end = 480 + 3 + 42,
.htotal = 480 + 3 + 42 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 11,
.vtotal = 272 + 2 + 11 + 3,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct panel_desc logicpd_type_28 = {
.modes = &logicpd_type_28_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 105,
.height = 67,
},
.delay = {
.prepare = 200,
.enable = 200,
.unprepare = 200,
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
.hsync_start = 800 + 0,
.hsync_end = 800 + 1,
.htotal = 800 + 0 + 1 + 160,
.vdisplay = 480,
.vsync_start = 480 + 0,
.vsync_end = 480 + 48 + 1,
.vtotal = 480 + 48 + 1 + 0,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.delay = {
.enable = 200,
.unprepare = 200,
.disable = 400,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
static const struct display_timing multi_inno_mi0700s4t_6_timing = {
.pixelclock = { 29000000, 33000000, 38000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 180, 210, 240 },
.hback_porch = { 16, 16, 16 },
.hsync_len = { 30, 30, 30 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 12, 22, 32 },
.vback_porch = { 10, 10, 10 },
.vsync_len = { 13, 13, 13 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc multi_inno_mi0700s4t_6 = {
.timings = &multi_inno_mi0700s4t_6_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing multi_inno_mi0800ft_9_timing = {
.pixelclock = { 32000000, 40000000, 50000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 16, 210, 354 },
.hback_porch = { 6, 26, 45 },
.hsync_len = { 1, 20, 40 },
.vactive = { 600, 600, 600 },
.vfront_porch = { 1, 12, 77 },
.vback_porch = { 3, 13, 22 },
.vsync_len = { 1, 10, 20 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc multi_inno_mi0800ft_9 = {
.timings = &multi_inno_mi0800ft_9_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 162,
.height = 122,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing multi_inno_mi1010ait_1cp_timing = {
.pixelclock = { 68900000, 70000000, 73400000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 30, 60, 71 },
.hback_porch = { 30, 60, 71 },
.hsync_len = { 10, 10, 48 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 10, 10 },
.vback_porch = { 5, 10, 10 },
.vsync_len = { 5, 6, 13 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc multi_inno_mi1010ait_1cp = {
.timings = &multi_inno_mi1010ait_1cp_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.delay = {
.enable = 50,
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 2, 30, 30 },
.hback_porch = { 6, 100, 100 },
.hsync_len = { 2, 30, 30 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 5, 5 },
.vback_porch = { 11, 11, 11 },
.vsync_len = { 7, 7, 7 },
};
static const struct panel_desc nec_nl12880bc20_05 = {
.timings = &nec_nl12880bc20_05_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 261,
.height = 163,
},
.delay = {
.enable = 50,
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
.clock = 10870,
.hdisplay = 480,
.hsync_start = 480 + 2,
.hsync_end = 480 + 2 + 41,
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 4,
.vtotal = 272 + 2 + 4 + 2,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc nec_nl4827hc19_05b = {
.modes = &nec_nl4827hc19_05b_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode netron_dy_e231732_mode = {
.clock = 66000,
.hdisplay = 1024,
.hsync_start = 1024 + 160,
.hsync_end = 1024 + 160 + 70,
.htotal = 1024 + 160 + 70 + 90,
.vdisplay = 600,
.vsync_start = 600 + 127,
.vsync_end = 600 + 127 + 20,
.vtotal = 600 + 127 + 20 + 3,
};
static const struct panel_desc netron_dy_e231732 = {
.modes = &netron_dy_e231732_mode,
.num_modes = 1,
.size = {
.width = 154,
.height = 87,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 2,
.hsync_end = 480 + 2 + 41,
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.modes = &newhaven_nhd_43_480272ef_atxl_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
.hfront_porch = { 80, 100, 100 },
.hback_porch = { 100, 120, 120 },
.hsync_len = { 50, 60, 60 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 12, 30, 30 },
.vback_porch = { 4, 10, 10 },
.vsync_len = { 4, 5, 5 },
};
static const struct panel_desc nlt_nl192108ac18_02d = {
.timings = &nlt_nl192108ac18_02d_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 344,
.height = 194,
},
.delay = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nvd_9128_mode = {
.clock = 29500,
.hdisplay = 800,
.hsync_start = 800 + 130,
.hsync_end = 800 + 130 + 98,
.htotal = 800 + 0 + 130 + 98,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 50,
.vtotal = 480 + 0 + 10 + 50,
};
static const struct panel_desc nvd_9128 = {
.modes = &nvd_9128_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 156,
.height = 88,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
.pixelclock = { 30000000, 30000000, 40000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 40, 40, 40 },
.hback_porch = { 40, 40, 40 },
.hsync_len = { 1, 48, 48 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 13, 13, 13 },
.vback_porch = { 29, 29, 29 },
.vsync_len = { 3, 3, 3 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc okaya_rs800480t_7x0gp = {
.timings = &okaya_rs800480t_7x0gp_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 87,
},
.delay = {
.prepare = 41,
.enable = 50,
.unprepare = 41,
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode olimex_lcd_olinuxino_43ts_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 5,
.hsync_end = 480 + 5 + 30,
.htotal = 480 + 5 + 30 + 10,
.vdisplay = 272,
.vsync_start = 272 + 8,
.vsync_end = 272 + 8 + 5,
.vtotal = 272 + 8 + 5 + 3,
};
static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.modes = &olimex_lcd_olinuxino_43ts_mode,
.num_modes = 1,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
/*
* 800x480 CVT. The panel appears to be quite accepting, at least as far as
* pixel clocks, but this is the timing that was being used in the Adafruit
* installation instructions.
*/
static const struct drm_display_mode ontat_yx700wv03_mode = {
.clock = 29500,
.hdisplay = 800,
.hsync_start = 824,
.hsync_end = 896,
.htotal = 992,
.vdisplay = 480,
.vsync_start = 483,
.vsync_end = 493,
.vtotal = 500,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
/*
* Specification at:
* https://www.adafruit.com/images/product-files/2406/c3163.pdf
*/
static const struct panel_desc ontat_yx700wv03 = {
.modes = &ontat_yx700wv03_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 83,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode ortustech_com37h3m_mode = {
.clock = 22230,
.hdisplay = 480,
.hsync_start = 480 + 40,
.hsync_end = 480 + 40 + 10,
.htotal = 480 + 40 + 10 + 40,
.vdisplay = 640,
.vsync_start = 640 + 4,
.vsync_end = 640 + 4 + 2,
.vtotal = 640 + 4 + 2 + 4,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc ortustech_com37h3m = {
.modes = &ortustech_com37h3m_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 56, /* 56.16mm */
.height = 75, /* 74.88mm */
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
};
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
.clock = 25000,
.hdisplay = 480,
.hsync_start = 480 + 10,
.hsync_end = 480 + 10 + 10,
.htotal = 480 + 10 + 10 + 15,
.vdisplay = 800,
.vsync_start = 800 + 3,
.vsync_end = 800 + 3 + 3,
.vtotal = 800 + 3 + 3 + 3,
};
static const struct panel_desc ortustech_com43h4m85ulc = {
.modes = &ortustech_com43h4m85ulc_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 56,
.height = 93,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
.clock = 33000,
.hdisplay = 800,
.hsync_start = 800 + 210,
.hsync_end = 800 + 210 + 30,
.htotal = 800 + 210 + 30 + 16,
.vdisplay = 480,
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 13,
.vtotal = 480 + 22 + 13 + 10,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc osddisplays_osd070t1718_19ts = {
.modes = &osddisplays_osd070t1718_19ts_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
.clock = 33300,
.hdisplay = 800,
.hsync_start = 800 + 1,
.hsync_end = 800 + 1 + 64,
.htotal = 800 + 1 + 64 + 64,
.vdisplay = 480,
.vsync_start = 480 + 1,
.vsync_end = 480 + 1 + 23,
.vtotal = 480 + 1 + 23 + 22,
};
static const struct panel_desc pda_91_00156_a0 = {
.modes = &pda_91_00156_a0_mode,
.num_modes = 1,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
.clock = 24750,
.hdisplay = 800,
.hsync_start = 800 + 54,
.hsync_end = 800 + 54 + 2,
.htotal = 800 + 54 + 2 + 44,
.vdisplay = 480,
.vsync_start = 480 + 49,
.vsync_end = 480 + 49 + 2,
.vtotal = 480 + 49 + 2 + 22,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc powertip_ph800480t013_idf02 = {
.modes = &powertip_ph800480t013_idf02_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode qd43003c0_40_mode = {
.clock = 9000,
.hdisplay = 480,
.hsync_start = 480 + 8,
.hsync_end = 480 + 8 + 4,
.htotal = 480 + 8 + 4 + 39,
.vdisplay = 272,
.vsync_start = 272 + 4,
.vsync_end = 272 + 4 + 10,
.vtotal = 272 + 4 + 10 + 2,
};
static const struct panel_desc qd43003c0_40 = {
.modes = &qd43003c0_40_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 53,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct drm_display_mode qishenglong_gopher2b_lcd_modes[] = {
{ /* 60 Hz */
.clock = 10800,
.hdisplay = 480,
.hsync_start = 480 + 77,
.hsync_end = 480 + 77 + 41,
.htotal = 480 + 77 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 16,
.vsync_end = 272 + 16 + 10,
.vtotal = 272 + 16 + 10 + 2,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
},
{ /* 50 Hz */
.clock = 10800,
.hdisplay = 480,
.hsync_start = 480 + 17,
.hsync_end = 480 + 17 + 41,
.htotal = 480 + 17 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 116,
.vsync_end = 272 + 116 + 10,
.vtotal = 272 + 116 + 10 + 2,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
},
};
static const struct panel_desc qishenglong_gopher2b_lcd = {
.modes = qishenglong_gopher2b_lcd_modes,
.num_modes = ARRAY_SIZE(qishenglong_gopher2b_lcd_modes),
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing rocktech_rk043fn48h_timing = {
.pixelclock = { 6000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
.hback_porch = { 8, 43, 43 },
.hfront_porch = { 2, 8, 8 },
.hsync_len = { 1, 1, 1 },
.vactive = { 272, 272, 272 },
.vback_porch = { 2, 12, 12 },
.vfront_porch = { 1, 4, 4 },
.vsync_len = { 1, 10, 10 },
.flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
static const struct panel_desc rocktech_rk043fn48h = {
.timings = &rocktech_rk043fn48h_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing rocktech_rk070er9427_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 16, 210, 354 },
.hback_porch = { 46, 46, 46 },
.hsync_len = { 1, 1, 1 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 7, 22, 147 },
.vback_porch = { 23, 23, 23 },
.vsync_len = { 1, 1, 1 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc rocktech_rk070er9427 = {
.timings = &rocktech_rk070er9427_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 154,
.height = 86,
},
.delay = {
.prepare = 41,
.enable = 50,
.unprepare = 41,
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
.clock = 71100,
.hdisplay = 1280,
.hsync_start = 1280 + 48,
.hsync_end = 1280 + 48 + 32,
.htotal = 1280 + 48 + 32 + 80,
.vdisplay = 800,
.vsync_start = 800 + 2,
.vsync_end = 800 + 2 + 5,
.vtotal = 800 + 2 + 5 + 16,
};
static const struct panel_desc rocktech_rk101ii01d_ct = {
.modes = &rocktech_rk101ii01d_ct_mode,
.bpc = 8,
.num_modes = 1,
.size = {
.width = 217,
.height = 136,
},
.delay = {
.prepare = 50,
.disable = 50,
},
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing samsung_ltl101al01_timing = {
.pixelclock = { 66663000, 66663000, 66663000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 18, 18, 18 },
.hback_porch = { 36, 36, 36 },
.hsync_len = { 16, 16, 16 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 4, 4, 4 },
.vback_porch = { 16, 16, 16 },
.vsync_len = { 3, 3, 3 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc samsung_ltl101al01 = {
.timings = &samsung_ltl101al01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 135,
},
.delay = {
.prepare = 40,
.enable = 300,
.disable = 200,
.unprepare = 600,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
.hsync_start = 1024 + 24,
.hsync_end = 1024 + 24 + 136,
.htotal = 1024 + 24 + 136 + 160,
.vdisplay = 600,
.vsync_start = 600 + 3,
.vsync_end = 600 + 3 + 6,
.vtotal = 600 + 3 + 6 + 61,
};
static const struct panel_desc samsung_ltn101nt05 = {
.modes = &samsung_ltn101nt05_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 223,
.height = 125,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing satoz_sat050at40h12r2_timing = {
.pixelclock = {33300000, 33300000, 50000000},
.hactive = {800, 800, 800},
.hfront_porch = {16, 210, 354},
.hback_porch = {46, 46, 46},
.hsync_len = {1, 1, 40},
.vactive = {480, 480, 480},
.vfront_porch = {7, 22, 147},
.vback_porch = {23, 23, 23},
.vsync_len = {1, 1, 20},
};
static const struct panel_desc satoz_sat050at40h12r2 = {
.timings = &satoz_sat050at40h12r2_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 108,
.height = 65,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
.clock = 33260,
.hdisplay = 800,
.hsync_start = 800 + 64,
.hsync_end = 800 + 64 + 128,
.htotal = 800 + 64 + 128 + 64,
.vdisplay = 480,
.vsync_start = 480 + 8,
.vsync_end = 480 + 8 + 2,
.vtotal = 480 + 8 + 2 + 35,
.flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
static const struct panel_desc sharp_lq070y3dg3b = {
.modes = &sharp_lq070y3dg3b_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152, /* 152.4mm */
.height = 91, /* 91.4mm */
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
};
static const struct drm_display_mode sharp_lq035q7db03_mode = {
.clock = 5500,
.hdisplay = 240,
.hsync_start = 240 + 16,
.hsync_end = 240 + 16 + 7,
.htotal = 240 + 16 + 7 + 5,
.vdisplay = 320,
.vsync_start = 320 + 9,
.vsync_end = 320 + 9 + 1,
.vtotal = 320 + 9 + 1 + 7,
};
static const struct panel_desc sharp_lq035q7db03 = {
.modes = &sharp_lq035q7db03_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 54,
.height = 72,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct display_timing sharp_lq101k1ly04_timing = {
.pixelclock = { 60000000, 65000000, 80000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 20, 20, 20 },
.hback_porch = { 20, 20, 20 },
.hsync_len = { 10, 10, 10 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 4, 4, 4 },
.vback_porch = { 4, 4, 4 },
.vsync_len = { 4, 4, 4 },
.flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
static const struct panel_desc sharp_lq101k1ly04 = {
.timings = &sharp_lq101k1ly04_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode sharp_ls020b1dd01d_modes[] = {
{ /* 50 Hz */
.clock = 3000,
.hdisplay = 240,
.hsync_start = 240 + 58,
.hsync_end = 240 + 58 + 1,
.htotal = 240 + 58 + 1 + 1,
.vdisplay = 160,
.vsync_start = 160 + 24,
.vsync_end = 160 + 24 + 10,
.vtotal = 160 + 24 + 10 + 6,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
},
{ /* 60 Hz */
.clock = 3000,
.hdisplay = 240,
.hsync_start = 240 + 8,
.hsync_end = 240 + 8 + 1,
.htotal = 240 + 8 + 1 + 1,
.vdisplay = 160,
.vsync_start = 160 + 24,
.vsync_end = 160 + 24 + 10,
.vtotal = 160 + 24 + 10 + 6,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
},
};
static const struct panel_desc sharp_ls020b1dd01d = {
.modes = sharp_ls020b1dd01d_modes,
.num_modes = ARRAY_SIZE(sharp_ls020b1dd01d_modes),
.bpc = 6,
.size = {
.width = 42,
.height = 28,
},
.bus_format = MEDIA_BUS_FMT_RGB565_1X16,
.bus_flags = DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
| DRM_BUS_FLAG_SHARP_SIGNALS,
};
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
.hsync_start = 800 + 1,
.hsync_end = 800 + 1 + 64,
.htotal = 800 + 1 + 64 + 64,
.vdisplay = 480,
.vsync_start = 480 + 1,
.vsync_end = 480 + 1 + 23,
.vtotal = 480 + 1 + 23 + 22,
};
static const struct panel_desc shelly_sca07010_bfn_lnn = {
.modes = &shelly_sca07010_bfn_lnn_mode,
.num_modes = 1,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode starry_kr070pe2t_mode = {
.clock = 33000,
.hdisplay = 800,
.hsync_start = 800 + 209,
.hsync_end = 800 + 209 + 1,
.htotal = 800 + 209 + 1 + 45,
.vdisplay = 480,
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 1,
.vtotal = 480 + 22 + 1 + 22,
};
static const struct panel_desc starry_kr070pe2t = {
.modes = &starry_kr070pe2t_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing startek_kd070wvfpa_mode = {
.pixelclock = { 25200000, 27200000, 30500000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 19, 44, 115 },
.hback_porch = { 5, 16, 101 },
.hsync_len = { 1, 2, 100 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 5, 43, 67 },
.vback_porch = { 5, 5, 67 },
.vsync_len = { 1, 2, 66 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc startek_kd070wvfpa = {
.timings = &startek_kd070wvfpa_mode,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.delay = {
.prepare = 20,
.enable = 200,
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.connector_type = DRM_MODE_CONNECTOR_DPI,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
};
static const struct display_timing tsd_tst043015cmhx_timing = {
.pixelclock = { 5000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
.hfront_porch = { 4, 5, 65 },
.hback_porch = { 36, 40, 255 },
.hsync_len = { 1, 1, 1 },
.vactive = { 272, 272, 272 },
.vfront_porch = { 2, 8, 97 },
.vback_porch = { 3, 8, 31 },
.vsync_len = { 1, 1, 1 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
static const struct panel_desc tsd_tst043015cmhx = {
.timings = &tsd_tst043015cmhx_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 105,
.height = 67,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.clock = 30000,
.hdisplay = 800,
.hsync_start = 800 + 39,
.hsync_end = 800 + 39 + 47,
.htotal = 800 + 39 + 47 + 39,
.vdisplay = 480,
.vsync_start = 480 + 13,
.vsync_end = 480 + 13 + 2,
.vtotal = 480 + 13 + 2 + 29,
};
static const struct panel_desc tfc_s9700rtwv43tr_01b = {
.modes = &tfc_s9700rtwv43tr_01b_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 155,
.height = 90,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct display_timing tianma_tm070jdhg30_timing = {
.pixelclock = { 62600000, 68200000, 78100000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 15, 64, 159 },
.hback_porch = { 5, 5, 5 },
.hsync_len = { 1, 1, 256 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 3, 40, 99 },
.vback_porch = { 2, 2, 2 },
.vsync_len = { 1, 1, 128 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc tianma_tm070jdhg30 = {
.timings = &tianma_tm070jdhg30_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 151,
.height = 95,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc tianma_tm070jvhg33 = {
.timings = &tianma_tm070jdhg30_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 150,
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
.pixelclock = { 27700000, 29200000, 39600000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 12, 40, 212 },
.hback_porch = { 88, 88, 88 },
.hsync_len = { 1, 1, 40 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 1, 13, 88 },
.vback_porch = { 32, 32, 32 },
.vsync_len = { 1, 1, 3 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc tianma_tm070rvhg71 = {
.timings = &tianma_tm070rvhg71_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
{
.clock = 10000,
.hdisplay = 320,
.hsync_start = 320 + 50,
.hsync_end = 320 + 50 + 6,
.htotal = 320 + 50 + 6 + 38,
.vdisplay = 240,
.vsync_start = 240 + 3,
.vsync_end = 240 + 3 + 1,
.vtotal = 240 + 3 + 1 + 17,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
},
};
static const struct panel_desc ti_nspire_cx_lcd_panel = {
.modes = ti_nspire_cx_lcd_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 65,
.height = 49,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = {
{
.clock = 10000,
.hdisplay = 320,
.hsync_start = 320 + 6,
.hsync_end = 320 + 6 + 6,
.htotal = 320 + 6 + 6 + 6,
.vdisplay = 240,
.vsync_start = 240 + 0,
.vsync_end = 240 + 0 + 1,
.vtotal = 240 + 0 + 1 + 0,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
};
static const struct panel_desc ti_nspire_classic_lcd_panel = {
.modes = ti_nspire_classic_lcd_mode,
.num_modes = 1,
/* The grayscale panel has 8 bit for the color .. Y (black) */
.bpc = 8,
.size = {
.width = 71,
.height = 53,
},
/* This is the grayscale bus format */
.bus_format = MEDIA_BUS_FMT_Y8_1X8,
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.clock = 79500,
.hdisplay = 1280,
.hsync_start = 1280 + 192,
.hsync_end = 1280 + 192 + 128,
.htotal = 1280 + 192 + 128 + 64,
.vdisplay = 768,
.vsync_start = 768 + 20,
.vsync_end = 768 + 20 + 7,
.vtotal = 768 + 20 + 7 + 3,
};
static const struct panel_desc toshiba_lt089ac29000 = {
.modes = &toshiba_lt089ac29000_mode,
.num_modes = 1,
.size = {
.width = 194,
.height = 116,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 128,
.htotal = 800 + 40 + 128 + 88,
.vdisplay = 480,
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 2,
.vtotal = 480 + 10 + 2 + 33,
};
static const struct panel_desc tpk_f07a_0102 = {
.modes = &tpk_f07a_0102_mode,
.num_modes = 1,
.size = {
.width = 152,
.height = 91,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f10a_0102_mode = {
.clock = 45000,
.hdisplay = 1024,
.hsync_start = 1024 + 176,
.hsync_end = 1024 + 176 + 5,
.htotal = 1024 + 176 + 5 + 88,
.vdisplay = 600,
.vsync_start = 600 + 20,
.vsync_end = 600 + 20 + 5,
.vtotal = 600 + 20 + 5 + 25,
};
static const struct panel_desc tpk_f10a_0102 = {
.modes = &tpk_f10a_0102_mode,
.num_modes = 1,
.size = {
.width = 223,
.height = 125,
},
};
static const struct display_timing urt_umsh_8596md_timing = {
.pixelclock = { 33260000, 33260000, 33260000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 41, 41, 41 },
.hback_porch = { 216 - 128, 216 - 128, 216 - 128 },
.hsync_len = { 71, 128, 128 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 10, 10, 10 },
.vback_porch = { 35 - 2, 35 - 2, 35 - 2 },
.vsync_len = { 2, 2, 2 },
.flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc urt_umsh_8596md_lvds = {
.timings = &urt_umsh_8596md_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc urt_umsh_8596md_parallel = {
.timings = &urt_umsh_8596md_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 152,
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode vivax_tpc9150_panel_mode = {
.clock = 60000,
.hdisplay = 1024,
.hsync_start = 1024 + 160,
.hsync_end = 1024 + 160 + 100,
.htotal = 1024 + 160 + 100 + 60,
.vdisplay = 600,
.vsync_start = 600 + 12,
.vsync_end = 600 + 12 + 10,
.vtotal = 600 + 12 + 10 + 13,
};
static const struct panel_desc vivax_tpc9150_panel = {
.modes = &vivax_tpc9150_panel_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 200,
.height = 115,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode vl050_8048nt_c01_mode = {
.clock = 33333,
.hdisplay = 800,
.hsync_start = 800 + 210,
.hsync_end = 800 + 210 + 20,
.htotal = 800 + 210 + 20 + 46,
.vdisplay = 480,
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 10,
.vtotal = 480 + 22 + 10 + 23,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc vl050_8048nt_c01 = {
.modes = &vl050_8048nt_c01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 120,
.height = 76,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct drm_display_mode winstar_wf35ltiacd_mode = {
.clock = 6410,
.hdisplay = 320,
.hsync_start = 320 + 20,
.hsync_end = 320 + 20 + 30,
.htotal = 320 + 20 + 30 + 38,
.vdisplay = 240,
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 3,
.vtotal = 240 + 4 + 3 + 15,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc winstar_wf35ltiacd = {
.modes = &winstar_wf35ltiacd_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 70,
.height = 53,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode = {
.clock = 51200,
.hdisplay = 1024,
.hsync_start = 1024 + 100,
.hsync_end = 1024 + 100 + 100,
.htotal = 1024 + 100 + 100 + 120,
.vdisplay = 600,
.vsync_start = 600 + 10,
.vsync_end = 600 + 10 + 10,
.vtotal = 600 + 10 + 10 + 15,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
.modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 154,
.height = 90,
},
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode arm_rtsm_mode[] = {
{
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1024 + 24,
.hsync_end = 1024 + 24 + 136,
.htotal = 1024 + 24 + 136 + 160,
.vdisplay = 768,
.vsync_start = 768 + 3,
.vsync_end = 768 + 3 + 6,
.vtotal = 768 + 3 + 6 + 29,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
},
};
static const struct panel_desc arm_rtsm = {
.modes = arm_rtsm_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 400,
.height = 300,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am-1280800n3tzqw-t00h",
.data = &ire_am_1280800n3tzqw_t00h,
}, {
.compatible = "ampire,am-480272h3tmqw-t01h",
.data = &ire_am_480272h3tmqw_t01h,
}, {
.compatible = "ampire,am-800480l1tmqw-t00h",
.data = &ire_am_800480l1tmqw_t00h,
}, {
.compatible = "ampire,am800480r3tmqwa1h",
.data = &ire_am800480r3tmqwa1h,
}, {
.compatible = "ampire,am800600p5tmqw-tb8h",
.data = &ire_am800600p5tmqwtb8h,
}, {
.compatible = "arm,rtsm-display",
.data = &arm_rtsm,
}, {
.compatible = "armadeus,st0700-adapt",
.data = &armadeus_st0700_adapt,
}, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
.compatible = "auo,g101evn010",
.data = &auo_g101evn010,
}, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
.compatible = "auo,g121ean01",
.data = &auo_g121ean01,
}, {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
.compatible = "auo,g156xtn01",
.data = &auo_g156xtn01,
}, {
.compatible = "auo,g185han01",
.data = &auo_g185han01,
}, {
.compatible = "auo,g190ean01",
.data = &auo_g190ean01,
}, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
.compatible = "auo,t215hvn01",
.data = &auo_t215hvn01,
}, {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
.compatible = "boe,ev121wxm-n10-1850",
.data = &boe_ev121wxm_n10_1850,
}, {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
.compatible = "cdtech,s070pws19hp-fc21",
.data = &cdtech_s070pws19hp_fc21,
}, {
.compatible = "cdtech,s070swv29hg-dc44",
.data = &cdtech_s070swv29hg_dc44,
}, {
.compatible = "cdtech,s070wv95-ct16",
.data = &cdtech_s070wv95_ct16,
}, {
.compatible = "chefree,ch101olhlwh-002",
.data = &chefree_ch101olhlwh_002,
}, {
.compatible = "chunghwa,claa070wp03xg",
.data = &chunghwa_claa070wp03xg,
}, {
.compatible = "chunghwa,claa101wa01a",
.data = &chunghwa_claa101wa01a
}, {
.compatible = "chunghwa,claa101wb01",
.data = &chunghwa_claa101wb01
}, {
.compatible = "dataimage,fg040346dsswbg04",
.data = &dataimage_fg040346dsswbg04,
}, {
.compatible = "dataimage,fg1001l0dsswmg01",
.data = &dataimage_fg1001l0dsswmg01,
}, {
.compatible = "dataimage,scf0700c48ggu18",
.data = &dataimage_scf0700c48ggu18,
}, {
.compatible = "dlc,dlc0700yzg-1",
.data = &dlc_dlc0700yzg_1,
}, {
.compatible = "dlc,dlc1010gig",
.data = &dlc_dlc1010gig,
}, {
.compatible = "edt,et035012dm6",
.data = &edt_et035012dm6,
}, {
.compatible = "edt,etm0350g0dh6",
.data = &edt_etm0350g0dh6,
}, {
.compatible = "edt,etm043080dh6gp",
.data = &edt_etm043080dh6gp,
}, {
.compatible = "edt,etm0430g0dh6",
.data = &edt_etm0430g0dh6,
}, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
.compatible = "edt,et070080dh6",
.data = &edt_etm0700g0dh6,
}, {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
.compatible = "edt,etm0700g0bdh6",
.data = &edt_etm0700g0bdh6,
}, {
.compatible = "edt,etm0700g0edh6",
.data = &edt_etm0700g0bdh6,
}, {
.compatible = "edt,etml0700y5dha",
.data = &edt_etml0700y5dha,
}, {
.compatible = "edt,etmv570g2dhu",
.data = &edt_etmv570g2dhu,
}, {
.compatible = "eink,vb3300-kca",
.data = &eink_vb3300_kca,
}, {
.compatible = "evervision,vgg804821",
.data = &evervision_vgg804821,
}, {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
.compatible = "frida,frd350h54004",
.data = &frida_frd350h54004,
}, {
.compatible = "friendlyarm,hd702e",
.data = &friendlyarm_hd702e,
}, {
.compatible = "giantplus,gpg482739qs5",
.data = &giantplus_gpg482739qs5
}, {
.compatible = "giantplus,gpm940b0",
.data = &giantplus_gpm940b0,
}, {
.compatible = "hannstar,hsd070pww1",
.data = &hannstar_hsd070pww1,
}, {
.compatible = "hannstar,hsd100pxn1",
.data = &hannstar_hsd100pxn1,
}, {
.compatible = "hannstar,hsd101pww2",
.data = &hannstar_hsd101pww2,
}, {
.compatible = "hit,tx23d38vm0caa",
.data = &hitachi_tx23d38vm0caa
}, {
.compatible = "innolux,at043tn24",
.data = &innolux_at043tn24,
}, {
.compatible = "innolux,at070tn92",
.data = &innolux_at070tn92,
}, {
.compatible = "innolux,g070ace-l01",
.data = &innolux_g070ace_l01,
}, {
.compatible = "innolux,g070y2-l01",
.data = &innolux_g070y2_l01,
}, {
.compatible = "innolux,g070y2-t02",
.data = &innolux_g070y2_t02,
}, {
.compatible = "innolux,g101ice-l01",
.data = &innolux_g101ice_l01
}, {
.compatible = "innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
.compatible = "innolux,g156hce-l01",
.data = &innolux_g156hce_l01,
}, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
.compatible = "koe,tx26d202vm0bwa",
.data = &koe_tx26d202vm0bwa,
}, {
.compatible = "koe,tx31d200vm0baa",
.data = &koe_tx31d200vm0baa,
}, {
.compatible = "kyo,tcg121xglp",
.data = &kyo_tcg121xglp,
}, {
.compatible = "lemaker,bl035-rgb-002",
.data = &lemaker_bl035_rgb_002,
}, {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
.compatible = "logictechno,lt161010-2nhc",
.data = &logictechno_lt161010_2nh,
}, {
.compatible = "logictechno,lt161010-2nhr",
.data = &logictechno_lt161010_2nh,
}, {
.compatible = "logictechno,lt170410-2whc",
.data = &logictechno_lt170410_2whc,
}, {
.compatible = "logictechno,lttd800480070-l2rt",
.data = &logictechno_lttd800480070_l2rt,
}, {
.compatible = "logictechno,lttd800480070-l6wh-rt",
.data = &logictechno_lttd800480070_l6wh_rt,
}, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
.compatible = "multi-inno,mi0800ft-9",
.data = &multi_inno_mi0800ft_9,
}, {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
.compatible = "nec,nl12880bc20-05",
.data = &nec_nl12880bc20_05,
}, {
.compatible = "nec,nl4827hc19-05b",
.data = &nec_nl4827hc19_05b,
}, {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
.compatible = "nvd,9128",
.data = &nvd_9128,
}, {
.compatible = "okaya,rs800480t-7x0gp",
.data = &okaya_rs800480t_7x0gp,
}, {
.compatible = "olimex,lcd-olinuxino-43-ts",
.data = &olimex_lcd_olinuxino_43ts,
}, {
.compatible = "ontat,yx700wv03",
.data = &ontat_yx700wv03,
}, {
.compatible = "ortustech,com37h3m05dtc",
.data = &ortustech_com37h3m,
}, {
.compatible = "ortustech,com37h3m99dtc",
.data = &ortustech_com37h3m,
}, {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
.compatible = "osddisplays,osd070t1718-19ts",
.data = &osddisplays_osd070t1718_19ts,
}, {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
.compatible = "powertip,ph800480t013-idf02",
.data = &powertip_ph800480t013_idf02,
}, {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
.compatible = "qishenglong,gopher2b-lcd",
.data = &qishenglong_gopher2b_lcd,
}, {
.compatible = "rocktech,rk043fn48h",
.data = &rocktech_rk043fn48h,
}, {
.compatible = "rocktech,rk070er9427",
.data = &rocktech_rk070er9427,
}, {
.compatible = "rocktech,rk101ii01d-ct",
.data = &rocktech_rk101ii01d_ct,
}, {
.compatible = "samsung,ltl101al01",
.data = &samsung_ltl101al01,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
.compatible = "satoz,sat050at40h12r2",
.data = &satoz_sat050at40h12r2,
}, {
.compatible = "sharp,lq035q7db03",
.data = &sharp_lq035q7db03,
}, {
.compatible = "sharp,lq070y3dg3b",
.data = &sharp_lq070y3dg3b,
}, {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
.compatible = "starry,kr070pe2t",
.data = &starry_kr070pe2t,
}, {
.compatible = "startek,kd070wvfpa",
.data = &startek_kd070wvfpa,
}, {
.compatible = "team-source-display,tst043015cmhx",
.data = &tsd_tst043015cmhx,
}, {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
.compatible = "tianma,tm070jvhg33",
.data = &tianma_tm070jvhg33,
}, {
.compatible = "tianma,tm070rvhg71",
.data = &tianma_tm070rvhg71,
}, {
.compatible = "ti,nspire-cx-lcd-panel",
.data = &ti_nspire_cx_lcd_panel,
}, {
.compatible = "ti,nspire-classic-lcd-panel",
.data = &ti_nspire_classic_lcd_panel,
}, {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
.compatible = "tpk,f10a-0102",
.data = &tpk_f10a_0102,
}, {
.compatible = "urt,umsh-8596md-t",
.data = &urt_umsh_8596md_parallel,
}, {
.compatible = "urt,umsh-8596md-1t",
.data = &urt_umsh_8596md_parallel,
}, {
.compatible = "urt,umsh-8596md-7t",
.data = &urt_umsh_8596md_parallel,
}, {
.compatible = "urt,umsh-8596md-11t",
.data = &urt_umsh_8596md_lvds,
}, {
.compatible = "urt,umsh-8596md-19t",
.data = &urt_umsh_8596md_lvds,
}, {
.compatible = "urt,umsh-8596md-20t",
.data = &urt_umsh_8596md_parallel,
}, {
.compatible = "vivax,tpc9150-panel",
.data = &vivax_tpc9150_panel,
}, {
.compatible = "vxt,vl050-8048nt-c01",
.data = &vl050_8048nt_c01,
}, {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
.compatible = "yes-optoelectronics,ytc700tlag-05-201c",
.data = &yes_optoelectronics_ytc700tlag_05_201c,
}, {
/* Must be the last entry */
.compatible = "panel-dpi",
.data = &panel_dpi,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, platform_of_match);
static int panel_simple_platform_probe(struct platform_device *pdev)
{
const struct panel_desc *desc;
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -ENODEV;
return panel_simple_probe(&pdev->dev, desc);
}
static void panel_simple_platform_remove(struct platform_device *pdev)
{
panel_simple_remove(&pdev->dev);
}
static void panel_simple_platform_shutdown(struct platform_device *pdev)
{
panel_simple_shutdown(&pdev->dev);
}
static const struct dev_pm_ops panel_simple_pm_ops = {
SET_RUNTIME_PM_OPS(panel_simple_suspend, panel_simple_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static struct platform_driver panel_simple_platform_driver = {
.driver = {
.name = "panel-simple",
.of_match_table = platform_of_match,
.pm = &panel_simple_pm_ops,
},
.probe = panel_simple_platform_probe,
.remove_new = panel_simple_platform_remove,
.shutdown = panel_simple_platform_shutdown,
};
struct panel_desc_dsi {
struct panel_desc desc;
unsigned long flags;
enum mipi_dsi_pixel_format format;
unsigned int lanes;
};
static const struct drm_display_mode auo_b080uan01_mode = {
.clock = 154500,
.hdisplay = 1200,
.hsync_start = 1200 + 62,
.hsync_end = 1200 + 62 + 4,
.htotal = 1200 + 62 + 4 + 62,
.vdisplay = 1920,
.vsync_start = 1920 + 9,
.vsync_end = 1920 + 9 + 2,
.vtotal = 1920 + 9 + 2 + 8,
};
static const struct panel_desc_dsi auo_b080uan01 = {
.desc = {
.modes = &auo_b080uan01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 108,
.height = 272,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode boe_tv080wum_nl0_mode = {
.clock = 160000,
.hdisplay = 1200,
.hsync_start = 1200 + 120,
.hsync_end = 1200 + 120 + 20,
.htotal = 1200 + 120 + 20 + 21,
.vdisplay = 1920,
.vsync_start = 1920 + 21,
.vsync_end = 1920 + 21 + 3,
.vtotal = 1920 + 21 + 3 + 18,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc_dsi boe_tv080wum_nl0 = {
.desc = {
.modes = &boe_tv080wum_nl0_mode,
.num_modes = 1,
.size = {
.width = 107,
.height = 172,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
.clock = 71000,
.hdisplay = 800,
.hsync_start = 800 + 32,
.hsync_end = 800 + 32 + 1,
.htotal = 800 + 32 + 1 + 57,
.vdisplay = 1280,
.vsync_start = 1280 + 28,
.vsync_end = 1280 + 28 + 1,
.vtotal = 1280 + 28 + 1 + 14,
};
static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.desc = {
.modes = &lg_ld070wx3_sl01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 94,
.height = 151,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode lg_lh500wx1_sd03_mode = {
.clock = 67000,
.hdisplay = 720,
.hsync_start = 720 + 12,
.hsync_end = 720 + 12 + 4,
.htotal = 720 + 12 + 4 + 112,
.vdisplay = 1280,
.vsync_start = 1280 + 8,
.vsync_end = 1280 + 8 + 4,
.vtotal = 1280 + 8 + 4 + 12,
};
static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
.desc = {
.modes = &lg_lh500wx1_sd03_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 62,
.height = 110,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
.clock = 157200,
.hdisplay = 1920,
.hsync_start = 1920 + 154,
.hsync_end = 1920 + 154 + 16,
.htotal = 1920 + 154 + 16 + 32,
.vdisplay = 1200,
.vsync_start = 1200 + 17,
.vsync_end = 1200 + 17 + 2,
.vtotal = 1200 + 17 + 2 + 16,
};
static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.desc = {
.modes = &panasonic_vvx10f004b00_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode lg_acx467akm_7_mode = {
.clock = 150000,
.hdisplay = 1080,
.hsync_start = 1080 + 2,
.hsync_end = 1080 + 2 + 2,
.htotal = 1080 + 2 + 2 + 2,
.vdisplay = 1920,
.vsync_start = 1920 + 2,
.vsync_end = 1920 + 2 + 2,
.vtotal = 1920 + 2 + 2 + 2,
};
static const struct panel_desc_dsi lg_acx467akm_7 = {
.desc = {
.modes = &lg_acx467akm_7_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 62,
.height = 110,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = 0,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode osd101t2045_53ts_mode = {
.clock = 154500,
.hdisplay = 1920,
.hsync_start = 1920 + 112,
.hsync_end = 1920 + 112 + 16,
.htotal = 1920 + 112 + 16 + 32,
.vdisplay = 1200,
.vsync_start = 1200 + 16,
.vsync_end = 1200 + 16 + 2,
.vtotal = 1200 + 16 + 2 + 16,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc_dsi osd101t2045_53ts = {
.desc = {
.modes = &osd101t2045_53ts_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 217,
.height = 136,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
.data = &auo_b080uan01
}, {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
.compatible = "lg,ld070wx3-sl01",
.data = &lg_ld070wx3_sl01
}, {
.compatible = "lg,lh500wx1-sd03",
.data = &lg_lh500wx1_sd03
}, {
.compatible = "panasonic,vvx10f004b00",
.data = &panasonic_vvx10f004b00
}, {
.compatible = "lg,acx467akm-7",
.data = &lg_acx467akm_7
}, {
.compatible = "osddisplays,osd101t2045-53ts",
.data = &osd101t2045_53ts
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, dsi_of_match);
static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct panel_desc_dsi *desc;
int err;
desc = of_device_get_match_data(&dsi->dev);
if (!desc)
return -ENODEV;
err = panel_simple_probe(&dsi->dev, &desc->desc);
if (err < 0)
return err;
dsi->mode_flags = desc->flags;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
err = mipi_dsi_attach(dsi);
if (err) {
struct panel_simple *panel = mipi_dsi_get_drvdata(dsi);
drm_panel_remove(&panel->base);
}
return err;
}
static void panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
{
int err;
err = mipi_dsi_detach(dsi);
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
panel_simple_remove(&dsi->dev);
}
static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
{
panel_simple_shutdown(&dsi->dev);
}
static struct mipi_dsi_driver panel_simple_dsi_driver = {
.driver = {
.name = "panel-simple-dsi",
.of_match_table = dsi_of_match,
.pm = &panel_simple_pm_ops,
},
.probe = panel_simple_dsi_probe,
.remove = panel_simple_dsi_remove,
.shutdown = panel_simple_dsi_shutdown,
};
static int __init panel_simple_init(void)
{
int err;
err = platform_driver_register(&panel_simple_platform_driver);
if (err < 0)
return err;
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
if (err < 0)
goto err_did_platform_register;
}
return 0;
err_did_platform_register:
platform_driver_unregister(&panel_simple_platform_driver);
return err;
}
module_init(panel_simple_init);
static void __exit panel_simple_exit(void)
{
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
platform_driver_unregister(&panel_simple_platform_driver);
}
module_exit(panel_simple_exit);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("DRM Driver for Simple Panels");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/panel/panel-simple.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sharp LS037V7DW01 LCD Panel Driver
*
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Based on the omapdrm-specific panel-sharp-ls037v7dw01 driver
*
* Copyright (C) 2013 Texas Instruments Incorporated
* Author: Tomi Valkeinen <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_connector.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct ls037v7dw01_panel {
struct drm_panel panel;
struct platform_device *pdev;
struct regulator *vdd;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
struct gpio_desc *ini_gpio; /* high = power on */
struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
};
#define to_ls037v7dw01_device(p) \
container_of(p, struct ls037v7dw01_panel, panel)
static int ls037v7dw01_disable(struct drm_panel *panel)
{
struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
gpiod_set_value_cansleep(lcd->ini_gpio, 0);
gpiod_set_value_cansleep(lcd->resb_gpio, 0);
/* Wait at least 5 vsyncs after disabling the LCD. */
msleep(100);
return 0;
}
static int ls037v7dw01_unprepare(struct drm_panel *panel)
{
struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
regulator_disable(lcd->vdd);
return 0;
}
static int ls037v7dw01_prepare(struct drm_panel *panel)
{
struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
int ret;
ret = regulator_enable(lcd->vdd);
if (ret < 0)
dev_err(&lcd->pdev->dev, "%s: failed to enable regulator\n",
__func__);
return ret;
}
static int ls037v7dw01_enable(struct drm_panel *panel)
{
struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
/* Wait couple of vsyncs before enabling the LCD. */
msleep(50);
gpiod_set_value_cansleep(lcd->resb_gpio, 1);
gpiod_set_value_cansleep(lcd->ini_gpio, 1);
return 0;
}
static const struct drm_display_mode ls037v7dw01_mode = {
.clock = 19200,
.hdisplay = 480,
.hsync_start = 480 + 1,
.hsync_end = 480 + 1 + 2,
.htotal = 480 + 1 + 2 + 28,
.vdisplay = 640,
.vsync_start = 640 + 1,
.vsync_end = 640 + 1 + 1,
.vtotal = 640 + 1 + 1 + 1,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 56,
.height_mm = 75,
};
static int ls037v7dw01_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &ls037v7dw01_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = ls037v7dw01_mode.width_mm;
connector->display_info.height_mm = ls037v7dw01_mode.height_mm;
/*
* FIXME: According to the datasheet pixel data is sampled on the
* rising edge of the clock, but the code running on the SDP3430
* indicates sampling on the negative edge. This should be tested on a
* real device.
*/
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
| DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
return 1;
}
static const struct drm_panel_funcs ls037v7dw01_funcs = {
.disable = ls037v7dw01_disable,
.unprepare = ls037v7dw01_unprepare,
.prepare = ls037v7dw01_prepare,
.enable = ls037v7dw01_enable,
.get_modes = ls037v7dw01_get_modes,
};
static int ls037v7dw01_probe(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd;
lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
platform_set_drvdata(pdev, lcd);
lcd->pdev = pdev;
lcd->vdd = devm_regulator_get(&pdev->dev, "envdd");
if (IS_ERR(lcd->vdd))
return dev_err_probe(&pdev->dev, PTR_ERR(lcd->vdd),
"failed to get regulator\n");
lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(lcd->ini_gpio))
return dev_err_probe(&pdev->dev, PTR_ERR(lcd->ini_gpio),
"failed to get enable gpio\n");
lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(lcd->resb_gpio))
return dev_err_probe(&pdev->dev, PTR_ERR(lcd->resb_gpio),
"failed to get reset gpio\n");
lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0,
GPIOD_OUT_LOW);
if (IS_ERR(lcd->mo_gpio)) {
dev_err(&pdev->dev, "failed to get mode[0] gpio\n");
return PTR_ERR(lcd->mo_gpio);
}
lcd->lr_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 1,
GPIOD_OUT_LOW);
if (IS_ERR(lcd->lr_gpio)) {
dev_err(&pdev->dev, "failed to get mode[1] gpio\n");
return PTR_ERR(lcd->lr_gpio);
}
lcd->ud_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 2,
GPIOD_OUT_LOW);
if (IS_ERR(lcd->ud_gpio)) {
dev_err(&pdev->dev, "failed to get mode[2] gpio\n");
return PTR_ERR(lcd->ud_gpio);
}
drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&lcd->panel);
return 0;
}
static void ls037v7dw01_remove(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
}
static const struct of_device_id ls037v7dw01_of_match[] = {
{ .compatible = "sharp,ls037v7dw01", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
static struct platform_driver ls037v7dw01_driver = {
.probe = ls037v7dw01_probe,
.remove_new = ls037v7dw01_remove,
.driver = {
.name = "panel-sharp-ls037v7dw01",
.of_match_table = ls037v7dw01_of_match,
},
};
module_platform_driver(ls037v7dw01_driver);
MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Red Hat
* Copyright (C) 2015 Sony Mobile Communications Inc.
* Author: Werner Johansson <[email protected]>
*
* Based on AUO panel driver by Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
struct sharp_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct gpio_desc *reset_gpio;
bool prepared;
};
static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
{
return container_of(panel, struct sharp_nt_panel, base);
}
static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0)
return ret;
msleep(120);
/* Novatek two-lane operation */
ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
if (ret < 0)
return ret;
/* Set both MCU and RGB I/F to 24bpp */
ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
(MIPI_DCS_PIXEL_FMT_24BIT << 4));
if (ret < 0)
return ret;
return 0;
}
static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0)
return ret;
return 0;
}
static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
return ret;
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
return ret;
return 0;
}
static int sharp_nt_panel_unprepare(struct drm_panel *panel)
{
struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
int ret;
if (!sharp_nt->prepared)
return 0;
ret = sharp_nt_panel_off(sharp_nt);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
return ret;
}
regulator_disable(sharp_nt->supply);
if (sharp_nt->reset_gpio)
gpiod_set_value(sharp_nt->reset_gpio, 0);
sharp_nt->prepared = false;
return 0;
}
static int sharp_nt_panel_prepare(struct drm_panel *panel)
{
struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
int ret;
if (sharp_nt->prepared)
return 0;
ret = regulator_enable(sharp_nt->supply);
if (ret < 0)
return ret;
msleep(20);
if (sharp_nt->reset_gpio) {
gpiod_set_value(sharp_nt->reset_gpio, 1);
msleep(1);
gpiod_set_value(sharp_nt->reset_gpio, 0);
msleep(1);
gpiod_set_value(sharp_nt->reset_gpio, 1);
msleep(10);
}
ret = sharp_nt_panel_init(sharp_nt);
if (ret < 0) {
dev_err(panel->dev, "failed to init panel: %d\n", ret);
goto poweroff;
}
ret = sharp_nt_panel_on(sharp_nt);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel on: %d\n", ret);
goto poweroff;
}
sharp_nt->prepared = true;
return 0;
poweroff:
regulator_disable(sharp_nt->supply);
if (sharp_nt->reset_gpio)
gpiod_set_value(sharp_nt->reset_gpio, 0);
return ret;
}
static const struct drm_display_mode default_mode = {
.clock = (540 + 48 + 32 + 80) * (960 + 3 + 10 + 15) * 60 / 1000,
.hdisplay = 540,
.hsync_start = 540 + 48,
.hsync_end = 540 + 48 + 32,
.htotal = 540 + 48 + 32 + 80,
.vdisplay = 960,
.vsync_start = 960 + 3,
.vsync_end = 960 + 3 + 10,
.vtotal = 960 + 3 + 10 + 15,
};
static int sharp_nt_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 54;
connector->display_info.height_mm = 95;
return 1;
}
static const struct drm_panel_funcs sharp_nt_panel_funcs = {
.unprepare = sharp_nt_panel_unprepare,
.prepare = sharp_nt_panel_prepare,
.get_modes = sharp_nt_panel_get_modes,
};
static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
{
struct device *dev = &sharp_nt->dsi->dev;
int ret;
sharp_nt->supply = devm_regulator_get(dev, "avdd");
if (IS_ERR(sharp_nt->supply))
return PTR_ERR(sharp_nt->supply);
sharp_nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(sharp_nt->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(sharp_nt->reset_gpio));
sharp_nt->reset_gpio = NULL;
} else {
gpiod_set_value(sharp_nt->reset_gpio, 0);
}
drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
&sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&sharp_nt->base);
if (ret)
return ret;
drm_panel_add(&sharp_nt->base);
return 0;
}
static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
{
if (sharp_nt->base.dev)
drm_panel_remove(&sharp_nt->base);
}
static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt;
int ret;
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_NO_EOT_PACKET;
sharp_nt = devm_kzalloc(&dsi->dev, sizeof(*sharp_nt), GFP_KERNEL);
if (!sharp_nt)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, sharp_nt);
sharp_nt->dsi = dsi;
ret = sharp_nt_panel_add(sharp_nt);
if (ret < 0)
return ret;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
sharp_nt_panel_del(sharp_nt);
return ret;
}
return 0;
}
static void sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_disable(&sharp_nt->base);
if (ret < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
sharp_nt_panel_del(sharp_nt);
}
static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
drm_panel_disable(&sharp_nt->base);
}
static const struct of_device_id sharp_nt_of_match[] = {
{ .compatible = "sharp,ls043t1le01-qhd", },
{ }
};
MODULE_DEVICE_TABLE(of, sharp_nt_of_match);
static struct mipi_dsi_driver sharp_nt_panel_driver = {
.driver = {
.name = "panel-sharp-ls043t1le01-qhd",
.of_match_table = sharp_nt_of_match,
},
.probe = sharp_nt_panel_probe,
.remove = sharp_nt_panel_remove,
.shutdown = sharp_nt_panel_shutdown,
};
module_mipi_dsi_driver(sharp_nt_panel_driver);
MODULE_AUTHOR("Werner Johansson <[email protected]>");
MODULE_DESCRIPTION("Sharp LS043T1LE01 NT35565-based qHD (540x960) video mode panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* LCD-OLinuXino support for panel driver
*
* Copyright (C) 2018 Olimex Ltd.
* Author: Stefan Mavrodiev <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/videomode.h>
#include <video/display_timing.h>
#include <drm/drm_device.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define LCD_OLINUXINO_HEADER_MAGIC 0x4F4CB727
#define LCD_OLINUXINO_DATA_LEN 256
struct lcd_olinuxino_mode {
u32 pixelclock;
u32 hactive;
u32 hfp;
u32 hbp;
u32 hpw;
u32 vactive;
u32 vfp;
u32 vbp;
u32 vpw;
u32 refresh;
u32 flags;
};
struct lcd_olinuxino_info {
char name[32];
u32 width_mm;
u32 height_mm;
u32 bpc;
u32 bus_format;
u32 bus_flag;
} __attribute__((__packed__));
struct lcd_olinuxino_eeprom {
u32 header;
u32 id;
char revision[4];
u32 serial;
struct lcd_olinuxino_info info;
u32 num_modes;
u8 reserved[180];
u32 checksum;
} __attribute__((__packed__));
struct lcd_olinuxino {
struct drm_panel panel;
struct device *dev;
struct i2c_client *client;
struct mutex mutex;
bool prepared;
bool enabled;
struct regulator *supply;
struct gpio_desc *enable_gpio;
struct lcd_olinuxino_eeprom eeprom;
};
static inline struct lcd_olinuxino *to_lcd_olinuxino(struct drm_panel *panel)
{
return container_of(panel, struct lcd_olinuxino, panel);
}
static int lcd_olinuxino_disable(struct drm_panel *panel)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
if (!lcd->enabled)
return 0;
lcd->enabled = false;
return 0;
}
static int lcd_olinuxino_unprepare(struct drm_panel *panel)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
if (!lcd->prepared)
return 0;
gpiod_set_value_cansleep(lcd->enable_gpio, 0);
regulator_disable(lcd->supply);
lcd->prepared = false;
return 0;
}
static int lcd_olinuxino_prepare(struct drm_panel *panel)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
int ret;
if (lcd->prepared)
return 0;
ret = regulator_enable(lcd->supply);
if (ret < 0)
return ret;
gpiod_set_value_cansleep(lcd->enable_gpio, 1);
lcd->prepared = true;
return 0;
}
static int lcd_olinuxino_enable(struct drm_panel *panel)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
if (lcd->enabled)
return 0;
lcd->enabled = true;
return 0;
}
static int lcd_olinuxino_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
struct lcd_olinuxino_mode *lcd_mode;
struct drm_display_mode *mode;
u32 i, num = 0;
for (i = 0; i < lcd->eeprom.num_modes; i++) {
lcd_mode = (struct lcd_olinuxino_mode *)
&lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
mode = drm_mode_create(connector->dev);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
lcd_mode->hactive,
lcd_mode->vactive,
lcd_mode->refresh);
continue;
}
mode->clock = lcd_mode->pixelclock;
mode->hdisplay = lcd_mode->hactive;
mode->hsync_start = lcd_mode->hactive + lcd_mode->hfp;
mode->hsync_end = lcd_mode->hactive + lcd_mode->hfp +
lcd_mode->hpw;
mode->htotal = lcd_mode->hactive + lcd_mode->hfp +
lcd_mode->hpw + lcd_mode->hbp;
mode->vdisplay = lcd_mode->vactive;
mode->vsync_start = lcd_mode->vactive + lcd_mode->vfp;
mode->vsync_end = lcd_mode->vactive + lcd_mode->vfp +
lcd_mode->vpw;
mode->vtotal = lcd_mode->vactive + lcd_mode->vfp +
lcd_mode->vpw + lcd_mode->vbp;
/* Always make the first mode preferred */
if (i == 0)
mode->type |= DRM_MODE_TYPE_PREFERRED;
mode->type |= DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
num++;
}
connector->display_info.width_mm = lcd_info->width_mm;
connector->display_info.height_mm = lcd_info->height_mm;
connector->display_info.bpc = lcd_info->bpc;
if (lcd_info->bus_format)
drm_display_info_set_bus_formats(&connector->display_info,
&lcd_info->bus_format, 1);
connector->display_info.bus_flags = lcd_info->bus_flag;
return num;
}
static const struct drm_panel_funcs lcd_olinuxino_funcs = {
.disable = lcd_olinuxino_disable,
.unprepare = lcd_olinuxino_unprepare,
.prepare = lcd_olinuxino_prepare,
.enable = lcd_olinuxino_enable,
.get_modes = lcd_olinuxino_get_modes,
};
static int lcd_olinuxino_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lcd_olinuxino *lcd;
u32 checksum, i;
int ret = 0;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
i2c_set_clientdata(client, lcd);
lcd->dev = dev;
lcd->client = client;
mutex_init(&lcd->mutex);
/* Copy data into buffer */
for (i = 0; i < LCD_OLINUXINO_DATA_LEN; i += I2C_SMBUS_BLOCK_MAX) {
mutex_lock(&lcd->mutex);
ret = i2c_smbus_read_i2c_block_data(client,
i,
I2C_SMBUS_BLOCK_MAX,
(u8 *)&lcd->eeprom + i);
mutex_unlock(&lcd->mutex);
if (ret < 0) {
dev_err(dev, "error reading from device at %02x\n", i);
return ret;
}
}
/* Check configuration checksum */
checksum = ~crc32(~0, (u8 *)&lcd->eeprom, 252);
if (checksum != lcd->eeprom.checksum) {
dev_err(dev, "configuration checksum does not match!\n");
return -EINVAL;
}
/* Check magic header */
if (lcd->eeprom.header != LCD_OLINUXINO_HEADER_MAGIC) {
dev_err(dev, "magic header does not match\n");
return -EINVAL;
}
dev_info(dev, "Detected %s, Rev. %s, Serial: %08x\n",
lcd->eeprom.info.name,
lcd->eeprom.revision,
lcd->eeprom.serial);
/*
* The eeprom can hold up to 4 modes.
* If the stored value is bigger, overwrite it.
*/
if (lcd->eeprom.num_modes > 4) {
dev_warn(dev, "invalid number of modes, falling back to 4\n");
lcd->eeprom.num_modes = 4;
}
lcd->enabled = false;
lcd->prepared = false;
lcd->supply = devm_regulator_get(dev, "power");
if (IS_ERR(lcd->supply))
return PTR_ERR(lcd->supply);
lcd->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
drm_panel_add(&lcd->panel);
return 0;
}
static void lcd_olinuxino_remove(struct i2c_client *client)
{
struct lcd_olinuxino *panel = i2c_get_clientdata(client);
drm_panel_remove(&panel->panel);
drm_panel_disable(&panel->panel);
drm_panel_unprepare(&panel->panel);
}
static const struct of_device_id lcd_olinuxino_of_ids[] = {
{ .compatible = "olimex,lcd-olinuxino" },
{ }
};
MODULE_DEVICE_TABLE(of, lcd_olinuxino_of_ids);
static struct i2c_driver lcd_olinuxino_driver = {
.driver = {
.name = "lcd_olinuxino",
.of_match_table = lcd_olinuxino_of_ids,
},
.probe = lcd_olinuxino_probe,
.remove = lcd_olinuxino_remove,
};
module_i2c_driver(lcd_olinuxino_driver);
MODULE_AUTHOR("Stefan Mavrodiev <[email protected]>");
MODULE_DESCRIPTION("LCD-OLinuXino driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c |
/*
* Copyright © 2016-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Portions of this file (derived from panel-simple.c) are:
*
* Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Raspberry Pi 7" touchscreen panel driver.
*
* The 7" touchscreen consists of a DPI LCD panel, a Toshiba
* TC358762XBG DSI-DPI bridge, and an I2C-connected Atmel ATTINY88-MUR
* controlling power management, the LCD PWM, and initial register
* setup of the Tohsiba.
*
* This driver controls the TC358762 and ATTINY88, presenting a DSI
* device with a drm_panel.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#define RPI_DSI_DRIVER_NAME "rpi-ts-dsi"
/* I2C registers of the Atmel microcontroller. */
enum REG_ADDR {
REG_ID = 0x80,
REG_PORTA, /* BIT(2) for horizontal flip, BIT(3) for vertical flip */
REG_PORTB,
REG_PORTC,
REG_PORTD,
REG_POWERON,
REG_PWM,
REG_DDRA,
REG_DDRB,
REG_DDRC,
REG_DDRD,
REG_TEST,
REG_WR_ADDRL,
REG_WR_ADDRH,
REG_READH,
REG_READL,
REG_WRITEH,
REG_WRITEL,
REG_ID2,
};
/* DSI D-PHY Layer Registers */
#define D0W_DPHYCONTTX 0x0004
#define CLW_DPHYCONTRX 0x0020
#define D0W_DPHYCONTRX 0x0024
#define D1W_DPHYCONTRX 0x0028
#define COM_DPHYCONTRX 0x0038
#define CLW_CNTRL 0x0040
#define D0W_CNTRL 0x0044
#define D1W_CNTRL 0x0048
#define DFTMODE_CNTRL 0x0054
/* DSI PPI Layer Registers */
#define PPI_STARTPPI 0x0104
#define PPI_BUSYPPI 0x0108
#define PPI_LINEINITCNT 0x0110
#define PPI_LPTXTIMECNT 0x0114
#define PPI_CLS_ATMR 0x0140
#define PPI_D0S_ATMR 0x0144
#define PPI_D1S_ATMR 0x0148
#define PPI_D0S_CLRSIPOCOUNT 0x0164
#define PPI_D1S_CLRSIPOCOUNT 0x0168
#define CLS_PRE 0x0180
#define D0S_PRE 0x0184
#define D1S_PRE 0x0188
#define CLS_PREP 0x01A0
#define D0S_PREP 0x01A4
#define D1S_PREP 0x01A8
#define CLS_ZERO 0x01C0
#define D0S_ZERO 0x01C4
#define D1S_ZERO 0x01C8
#define PPI_CLRFLG 0x01E0
#define PPI_CLRSIPO 0x01E4
#define HSTIMEOUT 0x01F0
#define HSTIMEOUTENABLE 0x01F4
/* DSI Protocol Layer Registers */
#define DSI_STARTDSI 0x0204
#define DSI_BUSYDSI 0x0208
#define DSI_LANEENABLE 0x0210
# define DSI_LANEENABLE_CLOCK BIT(0)
# define DSI_LANEENABLE_D0 BIT(1)
# define DSI_LANEENABLE_D1 BIT(2)
#define DSI_LANESTATUS0 0x0214
#define DSI_LANESTATUS1 0x0218
#define DSI_INTSTATUS 0x0220
#define DSI_INTMASK 0x0224
#define DSI_INTCLR 0x0228
#define DSI_LPTXTO 0x0230
#define DSI_MODE 0x0260
#define DSI_PAYLOAD0 0x0268
#define DSI_PAYLOAD1 0x026C
#define DSI_SHORTPKTDAT 0x0270
#define DSI_SHORTPKTREQ 0x0274
#define DSI_BTASTA 0x0278
#define DSI_BTACLR 0x027C
/* DSI General Registers */
#define DSIERRCNT 0x0300
#define DSISIGMOD 0x0304
/* DSI Application Layer Registers */
#define APLCTRL 0x0400
#define APLSTAT 0x0404
#define APLERR 0x0408
#define PWRMOD 0x040C
#define RDPKTLN 0x0410
#define PXLFMT 0x0414
#define MEMWRCMD 0x0418
/* LCDC/DPI Host Registers */
#define LCDCTRL 0x0420
#define HSR 0x0424
#define HDISPR 0x0428
#define VSR 0x042C
#define VDISPR 0x0430
#define VFUEN 0x0434
/* DBI-B Host Registers */
#define DBIBCTRL 0x0440
/* SPI Master Registers */
#define SPICMR 0x0450
#define SPITCR 0x0454
/* System Controller Registers */
#define SYSSTAT 0x0460
#define SYSCTRL 0x0464
#define SYSPLL1 0x0468
#define SYSPLL2 0x046C
#define SYSPLL3 0x0470
#define SYSPMCTRL 0x047C
/* GPIO Registers */
#define GPIOC 0x0480
#define GPIOO 0x0484
#define GPIOI 0x0488
/* I2C Registers */
#define I2CCLKCTRL 0x0490
/* Chip/Rev Registers */
#define IDREG 0x04A0
/* Debug Registers */
#define WCMDQUEUE 0x0500
#define RCMDQUEUE 0x0504
struct rpi_touchscreen {
struct drm_panel base;
struct mipi_dsi_device *dsi;
struct i2c_client *i2c;
};
static const struct drm_display_mode rpi_touchscreen_modes[] = {
{
/* Modeline comes from the Raspberry Pi firmware, with HFP=1
* plugged in and clock re-computed from that.
*/
.clock = 25979400 / 1000,
.hdisplay = 800,
.hsync_start = 800 + 1,
.hsync_end = 800 + 1 + 2,
.htotal = 800 + 1 + 2 + 46,
.vdisplay = 480,
.vsync_start = 480 + 7,
.vsync_end = 480 + 7 + 2,
.vtotal = 480 + 7 + 2 + 21,
},
};
static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
{
return container_of(panel, struct rpi_touchscreen, base);
}
static int rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
{
return i2c_smbus_read_byte_data(ts->i2c, reg);
}
static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
u8 reg, u8 val)
{
int ret;
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
{
u8 msg[] = {
reg,
reg >> 8,
val,
val >> 8,
val >> 16,
val >> 24,
};
mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg));
return 0;
}
static int rpi_touchscreen_disable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
rpi_touchscreen_i2c_write(ts, REG_PWM, 0);
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
udelay(1);
return 0;
}
static int rpi_touchscreen_noop(struct drm_panel *panel)
{
return 0;
}
static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
rpi_touchscreen_i2c_write(ts, REG_POWERON, 1);
/* Wait for nPWRDWN to go low to indicate poweron is done. */
for (i = 0; i < 100; i++) {
if (rpi_touchscreen_i2c_read(ts, REG_PORTB) & 1)
break;
}
rpi_touchscreen_write(ts, DSI_LANEENABLE,
DSI_LANEENABLE_CLOCK |
DSI_LANEENABLE_D0);
rpi_touchscreen_write(ts, PPI_D0S_CLRSIPOCOUNT, 0x05);
rpi_touchscreen_write(ts, PPI_D1S_CLRSIPOCOUNT, 0x05);
rpi_touchscreen_write(ts, PPI_D0S_ATMR, 0x00);
rpi_touchscreen_write(ts, PPI_D1S_ATMR, 0x00);
rpi_touchscreen_write(ts, PPI_LPTXTIMECNT, 0x03);
rpi_touchscreen_write(ts, SPICMR, 0x00);
rpi_touchscreen_write(ts, LCDCTRL, 0x00100150);
rpi_touchscreen_write(ts, SYSCTRL, 0x040f);
msleep(100);
rpi_touchscreen_write(ts, PPI_STARTPPI, 0x01);
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
/* Default to the same orientation as the closed source
* firmware used for the panel. Runtime rotation
* configuration will be supported using VC4's plane
* orientation bits.
*/
rpi_touchscreen_i2c_write(ts, REG_PORTA, BIT(2));
return 0;
}
static int rpi_touchscreen_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
unsigned int i, num = 0;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
for (i = 0; i < ARRAY_SIZE(rpi_touchscreen_modes); i++) {
const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay,
drm_mode_vrefresh(m));
continue;
}
mode->type |= DRM_MODE_TYPE_DRIVER;
if (i == 0)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
num++;
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = 154;
connector->display_info.height_mm = 86;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
return num;
}
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};
static int rpi_touchscreen_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct rpi_touchscreen *ts;
struct device_node *endpoint, *dsi_host_node;
struct mipi_dsi_host *host;
int ver;
struct mipi_dsi_device_info info = {
.type = RPI_DSI_DRIVER_NAME,
.channel = 0,
.node = NULL,
};
ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
i2c_set_clientdata(i2c, ts);
ts->i2c = i2c;
ver = rpi_touchscreen_i2c_read(ts, REG_ID);
if (ver < 0) {
dev_err(dev, "Atmel I2C read failed: %d\n", ver);
return -ENODEV;
}
switch (ver) {
case 0xde: /* ver 1 */
case 0xc3: /* ver 2 */
break;
default:
dev_err(dev, "Unknown Atmel firmware revision: 0x%02x\n", ver);
return -ENODEV;
}
/* Turn off at boot, so we can cleanly sequence powering on. */
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
/* Look up the DSI host. It needs to probe before we do. */
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!endpoint)
return -ENODEV;
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
if (!dsi_host_node)
goto error;
host = of_find_mipi_dsi_host_by_node(dsi_host_node);
of_node_put(dsi_host_node);
if (!host) {
of_node_put(endpoint);
return -EPROBE_DEFER;
}
info.node = of_graph_get_remote_port(endpoint);
if (!info.node)
goto error;
of_node_put(endpoint);
ts->dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(ts->dsi)) {
dev_err(dev, "DSI device registration failed: %ld\n",
PTR_ERR(ts->dsi));
return PTR_ERR(ts->dsi);
}
drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
*/
drm_panel_add(&ts->base);
return 0;
error:
of_node_put(endpoint);
return -ENODEV;
}
static void rpi_touchscreen_remove(struct i2c_client *i2c)
{
struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
mipi_dsi_detach(ts->dsi);
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
}
static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
{
int ret;
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM);
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 1;
ret = mipi_dsi_attach(dsi);
if (ret)
dev_err(&dsi->dev, "failed to attach dsi to host: %d\n", ret);
return ret;
}
static struct mipi_dsi_driver rpi_touchscreen_dsi_driver = {
.driver.name = RPI_DSI_DRIVER_NAME,
.probe = rpi_touchscreen_dsi_probe,
};
static const struct of_device_id rpi_touchscreen_of_ids[] = {
{ .compatible = "raspberrypi,7inch-touchscreen-panel" },
{ } /* sentinel */
};
MODULE_DEVICE_TABLE(of, rpi_touchscreen_of_ids);
static struct i2c_driver rpi_touchscreen_driver = {
.driver = {
.name = "rpi_touchscreen",
.of_match_table = rpi_touchscreen_of_ids,
},
.probe = rpi_touchscreen_probe,
.remove = rpi_touchscreen_remove,
};
static int __init rpi_touchscreen_init(void)
{
mipi_dsi_driver_register(&rpi_touchscreen_dsi_driver);
return i2c_add_driver(&rpi_touchscreen_driver);
}
module_init(rpi_touchscreen_init);
static void __exit rpi_touchscreen_exit(void)
{
i2c_del_driver(&rpi_touchscreen_driver);
mipi_dsi_driver_unregister(&rpi_touchscreen_dsi_driver);
}
module_exit(rpi_touchscreen_exit);
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_DESCRIPTION("Raspberry Pi 7-inch touchscreen driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for panels based on Himax HX8394 controller, such as:
*
* - HannStar HSD060BHW4 5.99" MIPI-DSI panel
*
* Copyright (C) 2021 Kamil Trzciński
*
* Based on drivers/gpu/drm/panel/panel-sitronix-st7703.c
* Copyright (C) Purism SPC 2019
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define DRV_NAME "panel-himax-hx8394"
/* Manufacturer specific commands sent via DSI, listed in HX8394-F datasheet */
#define HX8394_CMD_SETSEQUENCE 0xb0
#define HX8394_CMD_SETPOWER 0xb1
#define HX8394_CMD_SETDISP 0xb2
#define HX8394_CMD_SETCYC 0xb4
#define HX8394_CMD_SETVCOM 0xb6
#define HX8394_CMD_SETTE 0xb7
#define HX8394_CMD_SETSENSOR 0xb8
#define HX8394_CMD_SETEXTC 0xb9
#define HX8394_CMD_SETMIPI 0xba
#define HX8394_CMD_SETOTP 0xbb
#define HX8394_CMD_SETREGBANK 0xbd
#define HX8394_CMD_UNKNOWN1 0xc0
#define HX8394_CMD_SETDGCLUT 0xc1
#define HX8394_CMD_SETID 0xc3
#define HX8394_CMD_SETDDB 0xc4
#define HX8394_CMD_UNKNOWN2 0xc6
#define HX8394_CMD_SETCABC 0xc9
#define HX8394_CMD_SETCABCGAIN 0xca
#define HX8394_CMD_SETPANEL 0xcc
#define HX8394_CMD_SETOFFSET 0xd2
#define HX8394_CMD_SETGIP0 0xd3
#define HX8394_CMD_UNKNOWN3 0xd4
#define HX8394_CMD_SETGIP1 0xd5
#define HX8394_CMD_SETGIP2 0xd6
#define HX8394_CMD_SETGPO 0xd6
#define HX8394_CMD_SETSCALING 0xdd
#define HX8394_CMD_SETIDLE 0xdf
#define HX8394_CMD_SETGAMMA 0xe0
#define HX8394_CMD_SETCHEMODE_DYN 0xe4
#define HX8394_CMD_SETCHE 0xe5
#define HX8394_CMD_SETCESEL 0xe6
#define HX8394_CMD_SET_SP_CMD 0xe9
#define HX8394_CMD_SETREADINDEX 0xfe
#define HX8394_CMD_GETSPIREAD 0xff
struct hx8394 {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
bool prepared;
const struct hx8394_panel_desc *desc;
};
struct hx8394_panel_desc {
const struct drm_display_mode *mode;
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
int (*init_sequence)(struct hx8394 *ctx);
};
static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
{
return container_of(panel, struct hx8394, panel);
}
static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
/* 5.19.8 SETEXTC: Set extension command (B9h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
0xff, 0x83, 0x94);
/* 5.19.2 SETPOWER: Set power (B1h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.3 SETDISP: Set display related register (B2h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
0x00, 0x80, 0x78, 0x0c, 0x07);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
0x7c);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
0x00, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
0x4a, 0x4c, 0x4b, 0x7f);
/* 5.19.17 SETPANEL (CCh) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
0x1f, 0x31);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
0x7d, 0x7d);
/* Unknown command, not listed in the HX8394-F datasheet */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
0x02);
/* 5.19.11 Set register bank (BDh) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
0x00);
/* 5.19.11 Set register bank (BDh) */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
0xed);
return 0;
}
static const struct drm_display_mode hsd060bhw4_mode = {
.hdisplay = 720,
.hsync_start = 720 + 40,
.hsync_end = 720 + 40 + 46,
.htotal = 720 + 40 + 46 + 40,
.vdisplay = 1440,
.vsync_start = 1440 + 9,
.vsync_end = 1440 + 9 + 7,
.vtotal = 1440 + 9 + 7 + 7,
.clock = 74250,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 68,
.height_mm = 136,
};
static const struct hx8394_panel_desc hsd060bhw4_desc = {
.mode = &hsd060bhw4_mode,
.lanes = 4,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
.format = MIPI_DSI_FMT_RGB888,
.init_sequence = hsd060bhw4_init_sequence,
};
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
ret = ctx->desc->init_sequence(ctx);
if (ret) {
dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret) {
dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
/* Panel is operational 120 msec after reset */
msleep(120);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
goto sleep_in;
}
return 0;
sleep_in:
/* This will probably fail, but let's try orderly power off anyway. */
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (!ret)
msleep(50);
return ret;
}
static int hx8394_disable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret) {
dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(50); /* about 3 frames */
return 0;
}
static int hx8394_unprepare(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
if (!ctx->prepared)
return 0;
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vcc);
ctx->prepared = false;
return 0;
}
static int hx8394_prepare(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
int ret;
if (ctx->prepared)
return 0;
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
ret = regulator_enable(ctx->vcc);
if (ret) {
dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vcc;
}
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(180);
ctx->prepared = true;
return 0;
disable_vcc:
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->vcc);
return ret;
}
static int hx8394_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
drm_mode_vrefresh(ctx->desc->mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs hx8394_drm_funcs = {
.disable = hx8394_disable,
.unprepare = hx8394_unprepare,
.prepare = hx8394_prepare,
.enable = hx8394_enable,
.get_modes = hx8394_get_modes,
};
static int hx8394_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct hx8394 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
ctx->desc = of_device_get_match_data(dev);
dsi->mode_flags = ctx->desc->mode_flags;
dsi->format = ctx->desc->format;
dsi->lanes = ctx->desc->lanes;
ctx->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(ctx->vcc))
return dev_err_probe(dev, PTR_ERR(ctx->vcc),
"Failed to request vcc regulator\n");
ctx->iovcc = devm_regulator_get(dev, "iovcc");
if (IS_ERR(ctx->iovcc))
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err_probe(dev, ret, "mipi_dsi_attach failed\n");
drm_panel_remove(&ctx->panel);
return ret;
}
dev_dbg(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
drm_mode_vrefresh(ctx->desc->mode),
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
return 0;
}
static void hx8394_shutdown(struct mipi_dsi_device *dsi)
{
struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
}
static void hx8394_remove(struct mipi_dsi_device *dsi)
{
struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
hx8394_shutdown(dsi);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
static struct mipi_dsi_driver hx8394_driver = {
.probe = hx8394_probe,
.remove = hx8394_remove,
.shutdown = hx8394_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = hx8394_of_match,
},
};
module_mipi_dsi_driver(hx8394_driver);
MODULE_AUTHOR("Kamil Trzciński <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Himax HX8394 based MIPI DSI panels");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-himax-hx8394.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Novatek NT35510 panel driver
* Copyright (C) 2020 Linus Walleij <[email protected]>
* Based on code by Robert Teather (C) 2012 Samsung
*
* This display driver (and I refer to the physical component NT35510,
* not this Linux kernel software driver) can handle:
* 480x864, 480x854, 480x800, 480x720 and 480x640 pixel displays.
* It has 480x840x24bit SRAM embedded for storing a frame.
* When powered on the display is by default in 480x800 mode.
*
* The actual panels using this component have different names, but
* the code needed to set up and configure the panel will be similar,
* so they should all use the NT35510 driver with appropriate configuration
* per-panel, e.g. for physical size.
*
* This driver is for the DSI interface to panels using the NT35510.
*
* The NT35510 can also use an RGB (DPI) interface combined with an
* I2C or SPI interface for setting up the NT35510. If this is needed
* this panel driver should be refactored to also support that use
* case.
*/
#include <linux/backlight.h>
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
#define MCS_CMD_READ_ID1 0xDA
#define MCS_CMD_READ_ID2 0xDB
#define MCS_CMD_READ_ID3 0xDC
#define MCS_CMD_MTP_READ_SETTING 0xF8 /* Uncertain about name */
#define MCS_CMD_MTP_READ_PARAM 0xFF /* Uncertain about name */
/*
* These manufacturer commands are available after we enable manufacturer
* command set (MCS) for page 0.
*/
#define NT35510_P0_DOPCTR 0xB1
#define NT35510_P0_SDHDTCTR 0xB6
#define NT35510_P0_GSEQCTR 0xB7
#define NT35510_P0_SDEQCTR 0xB8
#define NT35510_P0_SDVPCTR 0xBA
#define NT35510_P0_DPFRCTR1 0xBD
#define NT35510_P0_DPFRCTR2 0xBE
#define NT35510_P0_DPFRCTR3 0xBF
#define NT35510_P0_DPMCTR12 0xCC
#define NT35510_P0_DOPCTR_LEN 2
#define NT35510_P0_GSEQCTR_LEN 2
#define NT35510_P0_SDEQCTR_LEN 4
#define NT35510_P0_SDVPCTR_LEN 1
#define NT35510_P0_DPFRCTR1_LEN 5
#define NT35510_P0_DPFRCTR2_LEN 5
#define NT35510_P0_DPFRCTR3_LEN 5
#define NT35510_P0_DPMCTR12_LEN 3
#define NT35510_DOPCTR_0_RAMKP BIT(7) /* Contents kept in sleep */
#define NT35510_DOPCTR_0_DSITE BIT(6) /* Enable TE signal */
#define NT35510_DOPCTR_0_DSIG BIT(5) /* Enable generic read/write */
#define NT35510_DOPCTR_0_DSIM BIT(4) /* Enable video mode on DSI */
#define NT35510_DOPCTR_0_EOTP BIT(3) /* Support EoTP */
#define NT35510_DOPCTR_0_N565 BIT(2) /* RGB or BGR pixel format */
#define NT35510_DOPCTR_1_TW_PWR_SEL BIT(4) /* TE power selector */
#define NT35510_DOPCTR_1_CRGB BIT(3) /* RGB or BGR byte order */
#define NT35510_DOPCTR_1_CTB BIT(2) /* Vertical scanning direction */
#define NT35510_DOPCTR_1_CRL BIT(1) /* Source driver data shift */
#define NT35510_P0_SDVPCTR_PRG BIT(2) /* 0 = normal operation, 1 = VGLO */
#define NT35510_P0_SDVPCTR_AVDD 0 /* source driver output = AVDD */
#define NT35510_P0_SDVPCTR_OFFCOL 1 /* source driver output = off color */
#define NT35510_P0_SDVPCTR_AVSS 2 /* source driver output = AVSS */
#define NT35510_P0_SDVPCTR_HI_Z 3 /* source driver output = High impedance */
/*
* These manufacturer commands are available after we enable manufacturer
* command set (MCS) for page 1.
*/
#define NT35510_P1_SETAVDD 0xB0
#define NT35510_P1_SETAVEE 0xB1
#define NT35510_P1_SETVCL 0xB2
#define NT35510_P1_SETVGH 0xB3
#define NT35510_P1_SETVRGH 0xB4
#define NT35510_P1_SETVGL 0xB5
#define NT35510_P1_BT1CTR 0xB6
#define NT35510_P1_BT2CTR 0xB7
#define NT35510_P1_BT3CTR 0xB8
#define NT35510_P1_BT4CTR 0xB9 /* VGH boosting times/freq */
#define NT35510_P1_BT5CTR 0xBA
#define NT35510_P1_PFMCTR 0xBB
#define NT35510_P1_SETVGP 0xBC
#define NT35510_P1_SETVGN 0xBD
#define NT35510_P1_SETVCMOFF 0xBE
#define NT35510_P1_VGHCTR 0xBF /* VGH output ctrl */
#define NT35510_P1_SET_GAMMA_RED_POS 0xD1
#define NT35510_P1_SET_GAMMA_GREEN_POS 0xD2
#define NT35510_P1_SET_GAMMA_BLUE_POS 0xD3
#define NT35510_P1_SET_GAMMA_RED_NEG 0xD4
#define NT35510_P1_SET_GAMMA_GREEN_NEG 0xD5
#define NT35510_P1_SET_GAMMA_BLUE_NEG 0xD6
/* AVDD and AVEE setting 3 bytes */
#define NT35510_P1_AVDD_LEN 3
#define NT35510_P1_AVEE_LEN 3
#define NT35510_P1_VGH_LEN 3
#define NT35510_P1_VGL_LEN 3
#define NT35510_P1_VGP_LEN 3
#define NT35510_P1_VGN_LEN 3
/* BT1CTR thru BT5CTR setting 3 bytes */
#define NT35510_P1_BT1CTR_LEN 3
#define NT35510_P1_BT2CTR_LEN 3
#define NT35510_P1_BT4CTR_LEN 3
#define NT35510_P1_BT5CTR_LEN 3
/* 52 gamma parameters times two per color: positive and negative */
#define NT35510_P1_GAMMA_LEN 52
/**
* struct nt35510_config - the display-specific NT35510 configuration
*
* Some of the settings provide an array of bytes, A, B C which mean:
* A = normal / idle off mode
* B = idle on mode
* C = partial / idle off mode
*
* Gamma correction arrays are 10bit numbers, two consecutive bytes
* makes out one point on the gamma correction curve. The points are
* not linearly placed along the X axis, we get points 0, 1, 3, 5
* 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, 192, 208, 224, 232,
* 240, 244, 248, 250, 252, 254, 255. The voltages tuples form
* V0, V1, V3 ... V255, with 0x0000 being the lowest voltage and
* 0x03FF being the highest voltage.
*
* Each value must be strictly higher than the previous value forming
* a rising curve like this:
*
* ^
* | V255
* | V254
* | ....
* | V5
* | V3
* | V1
* | V0
* +------------------------------------------->
*
* The details about all settings can be found in the NT35510 Application
* Note.
*/
struct nt35510_config {
/**
* @width_mm: physical panel width [mm]
*/
u32 width_mm;
/**
* @height_mm: physical panel height [mm]
*/
u32 height_mm;
/**
* @mode: the display mode. This is only relevant outside the panel
* in video mode: in command mode this is configuring the internal
* timing in the display controller.
*/
const struct drm_display_mode mode;
/**
* @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
* in 0.1V steps the default is 0x05 which means 6.0V
*/
u8 avdd[NT35510_P1_AVDD_LEN];
/**
* @bt1ctr: setting for boost power control for the AVDD step-up
* circuit (1)
* bits 0..2 in the lower nibble controls PCK, the booster clock
* frequency for the step-up circuit:
* 0 = Hsync/32
* 1 = Hsync/16
* 2 = Hsync/8
* 3 = Hsync/4
* 4 = Hsync/2
* 5 = Hsync
* 6 = Hsync x 2
* 7 = Hsync x 4
* bits 4..6 in the upper nibble controls BTP, the boosting
* amplification for the step-up circuit:
* 0 = Disable
* 1 = 1.5 x VDDB
* 2 = 1.66 x VDDB
* 3 = 2 x VDDB
* 4 = 2.5 x VDDB
* 5 = 3 x VDDB
* The defaults are 4 and 4 yielding 0x44
*/
u8 bt1ctr[NT35510_P1_BT1CTR_LEN];
/**
* @avee: setting for AVEE ranging from 0x00 = -6.5V to 0x14 = -4.5V
* in 0.1V steps the default is 0x05 which means -6.0V
*/
u8 avee[NT35510_P1_AVEE_LEN];
/**
* @bt2ctr: setting for boost power control for the AVEE step-up
* circuit (2)
* bits 0..2 in the lower nibble controls NCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTN, the boosting
* amplification for the step-up circuit.
* 0 = Disable
* 1 = -1.5 x VDDB
* 2 = -2 x VDDB
* 3 = -2.5 x VDDB
* 4 = -3 x VDDB
* The defaults are 4 and 3 yielding 0x34
*/
u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
/**
* @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
* in 1V steps, the default is 0x08 which means 15V
*/
u8 vgh[NT35510_P1_VGH_LEN];
/**
* @bt4ctr: setting for boost power control for the VGH step-up
* circuit (4)
* bits 0..2 in the lower nibble controls HCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTH, the boosting
* amplification for the step-up circuit.
* 0 = AVDD + VDDB
* 1 = AVDD - AVEE
* 2 = AVDD - AVEE + VDDB
* 3 = AVDD x 2 - AVEE
* The defaults are 4 and 3 yielding 0x34
*/
u8 bt4ctr[NT35510_P1_BT4CTR_LEN];
/**
* @vgl: setting for VGL ranging from 0x00 = -2V to 0x0f = -15V in
* 1V steps, the default is 0x08 which means -10V
*/
u8 vgl[NT35510_P1_VGL_LEN];
/**
* @bt5ctr: setting for boost power control for the VGL step-up
* circuit (5)
* bits 0..2 in the lower nibble controls LCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTL, the boosting
* amplification for the step-up circuit.
* 0 = AVEE + VCL
* 1 = AVEE - AVDD
* 2 = AVEE + VCL - AVDD
* 3 = AVEE x 2 - AVDD
* The defaults are 3 and 2 yielding 0x32
*/
u8 bt5ctr[NT35510_P1_BT5CTR_LEN];
/**
* @vgp: setting for VGP, the positive gamma divider voltages
* VGMP the high voltage and VGSP the low voltage.
* The first byte contains bit 8 of VGMP and VGSP in bits 4 and 0
* The second byte contains bit 0..7 of VGMP
* The third byte contains bit 0..7 of VGSP
* VGMP 0x00 = 3.0V .. 0x108 = 6.3V in steps of 12.5mV
* VGSP 0x00 = 0V .. 0x111 = 3.7V in steps of 12.5mV
*/
u8 vgp[NT35510_P1_VGP_LEN];
/**
* @vgn: setting for VGN, the negative gamma divider voltages,
* same layout of bytes as @vgp.
*/
u8 vgn[NT35510_P1_VGN_LEN];
/**
* @sdeqctr: Source driver control settings, first byte is
* 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
* mode 2 uses three steps meaning EQS3 is not used in mode
* 1. Mode 2 is default. The last three parameters are EQS1, EQS2
* and EQS3, setting the rise time for each equalizer step:
* 0x00 = 0.0 us to 0x0f = 7.5 us in steps of 0.5us. The default
* is 0x07 = 3.5 us.
*/
u8 sdeqctr[NT35510_P0_SDEQCTR_LEN];
/**
* @sdvpctr: power/voltage behaviour during vertical porch time
*/
u8 sdvpctr;
/**
* @t1: the number of pixel clocks on one scanline, range
* 0x100 (258 ticks) .. 0x3FF (1024 ticks) so the value + 1
* clock ticks.
*/
u16 t1;
/**
* @vbp: vertical back porch toward the PANEL note: not toward
* the DSI host; these are separate interfaces, in from DSI host
* and out to the panel.
*/
u8 vbp;
/**
* @vfp: vertical front porch toward the PANEL.
*/
u8 vfp;
/**
* @psel: pixel clock divisor: 0 = 1, 1 = 2, 2 = 4, 3 = 8.
*/
u8 psel;
/**
* @dpmctr12: Display timing control 12
* Byte 1 bit 4 selects LVGL voltage level: 0 = VGLX, 1 = VGL_REG
* Byte 1 bit 1 selects gate signal mode: 0 = non-overlap, 1 = overlap
* Byte 1 bit 0 selects output signal control R/L swap, 0 = normal
* 1 = swap all O->E, L->R
* Byte 2 is CLW delay clock for CK O/E and CKB O/E signals:
* 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
* Byte 3 is FTI_H0 delay time for STP O/E signals:
* 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
*/
u8 dpmctr12[NT35510_P0_DPMCTR12_LEN];
/**
* @gamma_corr_pos_r: Red gamma correction parameters, positive
*/
u8 gamma_corr_pos_r[NT35510_P1_GAMMA_LEN];
/**
* @gamma_corr_pos_g: Green gamma correction parameters, positive
*/
u8 gamma_corr_pos_g[NT35510_P1_GAMMA_LEN];
/**
* @gamma_corr_pos_b: Blue gamma correction parameters, positive
*/
u8 gamma_corr_pos_b[NT35510_P1_GAMMA_LEN];
/**
* @gamma_corr_neg_r: Red gamma correction parameters, negative
*/
u8 gamma_corr_neg_r[NT35510_P1_GAMMA_LEN];
/**
* @gamma_corr_neg_g: Green gamma correction parameters, negative
*/
u8 gamma_corr_neg_g[NT35510_P1_GAMMA_LEN];
/**
* @gamma_corr_neg_b: Blue gamma correction parameters, negative
*/
u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
};
/**
* struct nt35510 - state container for the NT35510 panel
*/
struct nt35510 {
/**
* @dev: the container device
*/
struct device *dev;
/**
* @conf: the specific panel configuration, as the NT35510
* can be combined with many physical panels, they can have
* different physical dimensions and gamma correction etc,
* so this is stored in the config.
*/
const struct nt35510_config *conf;
/**
* @panel: the DRM panel object for the instance
*/
struct drm_panel panel;
/**
* @supplies: regulators supplying the panel
*/
struct regulator_bulk_data supplies[2];
/**
* @reset_gpio: the reset line
*/
struct gpio_desc *reset_gpio;
};
/* Manufacturer command has strictly this byte sequence */
static const u8 nt35510_mauc_mtp_read_param[] = { 0xAA, 0x55, 0x25, 0x01 };
static const u8 nt35510_mauc_mtp_read_setting[] = { 0x01, 0x02, 0x00, 0x20,
0x33, 0x13, 0x00, 0x40,
0x00, 0x00, 0x23, 0x02 };
static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
static const u8 nt35510_vgh_on[] = { 0x01 };
static inline struct nt35510 *panel_to_nt35510(struct drm_panel *panel)
{
return container_of(panel, struct nt35510, panel);
}
#define NT35510_ROTATE_0_SETTING 0x02
#define NT35510_ROTATE_180_SETTING 0x00
static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
u8 cmd, u8 cmdlen, const u8 *seq)
{
const u8 *seqp = seq;
int cmdwritten = 0;
int chunk = cmdlen;
int ret;
if (chunk > 15)
chunk = 15;
ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
if (ret < 0) {
dev_err(nt->dev, "error sending DCS command seq cmd %02x\n", cmd);
return ret;
}
cmdwritten += chunk;
seqp += chunk;
while (cmdwritten < cmdlen) {
chunk = cmdlen - cmdwritten;
if (chunk > 15)
chunk = 15;
ret = mipi_dsi_generic_write(dsi, seqp, chunk);
if (ret < 0) {
dev_err(nt->dev, "error sending generic write seq %02x\n", cmd);
return ret;
}
cmdwritten += chunk;
seqp += chunk;
}
dev_dbg(nt->dev, "sent command %02x %02x bytes\n", cmd, cmdlen);
return 0;
}
static int nt35510_read_id(struct nt35510 *nt)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
u8 id1, id2, id3;
int ret;
ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
if (ret < 0) {
dev_err(nt->dev, "could not read MTP ID1\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
if (ret < 0) {
dev_err(nt->dev, "could not read MTP ID2\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
if (ret < 0) {
dev_err(nt->dev, "could not read MTP ID3\n");
return ret;
}
/*
* Multi-Time Programmable (?) memory contains manufacturer
* ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
* version.
*/
dev_info(nt->dev, "MTP ID manufacturer: %02x version: %02x driver: %02x\n", id1, id2, id3);
return 0;
}
/**
* nt35510_setup_power() - set up power config in page 1
* @nt: the display instance to set up
*/
static int nt35510_setup_power(struct nt35510 *nt)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
int ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVDD,
NT35510_P1_AVDD_LEN,
nt->conf->avdd);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_BT1CTR,
NT35510_P1_BT1CTR_LEN,
nt->conf->bt1ctr);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVEE,
NT35510_P1_AVEE_LEN,
nt->conf->avee);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_BT2CTR,
NT35510_P1_BT2CTR_LEN,
nt->conf->bt2ctr);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
NT35510_P1_VGH_LEN,
nt->conf->vgh);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_BT4CTR,
NT35510_P1_BT4CTR_LEN,
nt->conf->bt4ctr);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_VGHCTR,
ARRAY_SIZE(nt35510_vgh_on),
nt35510_vgh_on);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGL,
NT35510_P1_VGL_LEN,
nt->conf->vgl);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_BT5CTR,
NT35510_P1_BT5CTR_LEN,
nt->conf->bt5ctr);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGP,
NT35510_P1_VGP_LEN,
nt->conf->vgp);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGN,
NT35510_P1_VGN_LEN,
nt->conf->vgn);
if (ret)
return ret;
/* Typically 10 ms */
usleep_range(10000, 20000);
return 0;
}
/**
* nt35510_setup_display() - set up display config in page 0
* @nt: the display instance to set up
*/
static int nt35510_setup_display(struct nt35510 *nt)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
const struct nt35510_config *conf = nt->conf;
u8 dopctr[NT35510_P0_DOPCTR_LEN];
u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
/* FIXME: set up any rotation (assume none for now) */
u8 addr_mode = NT35510_ROTATE_0_SETTING;
u8 val;
int ret;
/* Enable TE, EoTP and RGB pixel format */
dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
NT35510_DOPCTR_0_N565;
dopctr[1] = NT35510_DOPCTR_1_CTB;
ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
NT35510_P0_DOPCTR_LEN,
dopctr);
if (ret)
return ret;
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
sizeof(addr_mode));
if (ret < 0)
return ret;
/*
* Source data hold time, default 0x05 = 2.5us
* 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
* 0x0A = 5us
*/
val = 0x0A;
ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
sizeof(val));
if (ret < 0)
return ret;
/* EQ control for gate signals, 0x00 = 0 us */
gseqctr[0] = 0x00;
gseqctr[1] = 0x00;
ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
NT35510_P0_GSEQCTR_LEN,
gseqctr);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P0_SDEQCTR,
NT35510_P0_SDEQCTR_LEN,
conf->sdeqctr);
if (ret)
return ret;
ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDVPCTR,
&conf->sdvpctr, 1);
if (ret < 0)
return ret;
/*
* Display timing control for active and idle off mode:
* the first byte contains
* the two high bits of T1A and second byte the low 8 bits, and
* the valid range is 0x100 (257) to 0x3ff (1023) representing
* 258..1024 (+1) pixel clock ticks for one scanline. At 20MHz pixel
* clock this covers the range of 12.90us .. 51.20us in steps of
* 0.05us, the default is 0x184 (388) representing 389 ticks.
* The third byte is VBPDA, vertical back porch display active
* and the fourth VFPDA, vertical front porch display active,
* both given in number of scanlines in the range 0x02..0xff
* for 2..255 scanlines. The fifth byte is 2 bits selecting
* PSEL for active and idle off mode, how much the 20MHz clock
* is divided by 0..3. This needs to be adjusted to get the right
* frame rate.
*/
dpfrctr[0] = (conf->t1 >> 8) & 0xFF;
dpfrctr[1] = conf->t1 & 0xFF;
/* Vertical back porch */
dpfrctr[2] = conf->vbp;
/* Vertical front porch */
dpfrctr[3] = conf->vfp;
dpfrctr[4] = conf->psel;
ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR1,
NT35510_P0_DPFRCTR1_LEN,
dpfrctr);
if (ret)
return ret;
/* For idle and partial idle off mode we decrease front porch by one */
dpfrctr[3]--;
ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR2,
NT35510_P0_DPFRCTR2_LEN,
dpfrctr);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR3,
NT35510_P0_DPFRCTR3_LEN,
dpfrctr);
if (ret)
return ret;
/* Enable TE on vblank */
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret)
return ret;
/* Turn on the pads? */
ret = nt35510_send_long(nt, dsi, NT35510_P0_DPMCTR12,
NT35510_P0_DPMCTR12_LEN,
conf->dpmctr12);
if (ret)
return ret;
return 0;
}
static int nt35510_set_brightness(struct backlight_device *bl)
{
struct nt35510 *nt = bl_get_data(bl);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
u8 brightness = bl->props.brightness;
int ret;
dev_dbg(nt->dev, "set brightness %d\n", brightness);
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&brightness,
sizeof(brightness));
if (ret < 0)
return ret;
return 0;
}
static const struct backlight_ops nt35510_bl_ops = {
.update_status = nt35510_set_brightness,
};
/*
* This power-on sequence
*/
static int nt35510_power_on(struct nt35510 *nt)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(nt->supplies), nt->supplies);
if (ret < 0) {
dev_err(nt->dev, "unable to enable regulators\n");
return ret;
}
/* Toggle RESET in accordance with datasheet page 370 */
if (nt->reset_gpio) {
gpiod_set_value(nt->reset_gpio, 1);
/* Active min 10 us according to datasheet, let's say 20 */
usleep_range(20, 1000);
gpiod_set_value(nt->reset_gpio, 0);
/*
* 5 ms during sleep mode, 120 ms during sleep out mode
* according to datasheet, let's use 120-140 ms.
*/
usleep_range(120000, 140000);
}
ret = nt35510_send_long(nt, dsi, MCS_CMD_MTP_READ_PARAM,
ARRAY_SIZE(nt35510_mauc_mtp_read_param),
nt35510_mauc_mtp_read_param);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, MCS_CMD_MTP_READ_SETTING,
ARRAY_SIZE(nt35510_mauc_mtp_read_setting),
nt35510_mauc_mtp_read_setting);
if (ret)
return ret;
nt35510_read_id(nt);
/* Set up stuff in manufacturer control, page 1 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
ARRAY_SIZE(nt35510_mauc_select_page_1),
nt35510_mauc_select_page_1);
if (ret)
return ret;
ret = nt35510_setup_power(nt);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
NT35510_P1_GAMMA_LEN,
nt->conf->gamma_corr_pos_r);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
NT35510_P1_GAMMA_LEN,
nt->conf->gamma_corr_pos_g);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
NT35510_P1_GAMMA_LEN,
nt->conf->gamma_corr_pos_b);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
NT35510_P1_GAMMA_LEN,
nt->conf->gamma_corr_neg_r);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
NT35510_P1_GAMMA_LEN,
nt->conf->gamma_corr_neg_g);
if (ret)
return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
NT35510_P1_GAMMA_LEN,
nt->conf->gamma_corr_neg_b);
if (ret)
return ret;
/* Set up stuff in manufacturer control, page 0 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
ARRAY_SIZE(nt35510_mauc_select_page_0),
nt35510_mauc_select_page_0);
if (ret)
return ret;
ret = nt35510_setup_display(nt);
if (ret)
return ret;
return 0;
}
static int nt35510_power_off(struct nt35510 *nt)
{
int ret;
ret = regulator_bulk_disable(ARRAY_SIZE(nt->supplies), nt->supplies);
if (ret)
return ret;
if (nt->reset_gpio)
gpiod_set_value(nt->reset_gpio, 1);
return 0;
}
static int nt35510_unprepare(struct drm_panel *panel)
{
struct nt35510 *nt = panel_to_nt35510(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
int ret;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
return ret;
}
usleep_range(10000, 20000);
/* Enter sleep mode */
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret) {
dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
return ret;
}
/* Wait 4 frames, how much is that 5ms in the vendor driver */
usleep_range(5000, 10000);
ret = nt35510_power_off(nt);
if (ret)
return ret;
return 0;
}
static int nt35510_prepare(struct drm_panel *panel)
{
struct nt35510 *nt = panel_to_nt35510(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
int ret;
ret = nt35510_power_on(nt);
if (ret)
return ret;
/* Exit sleep mode */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret) {
dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
return ret;
}
/* Up to 120 ms */
usleep_range(120000, 150000);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
return ret;
}
/* Some 10 ms */
usleep_range(10000, 20000);
return 0;
}
static int nt35510_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct nt35510 *nt = panel_to_nt35510(panel);
struct drm_display_mode *mode;
struct drm_display_info *info;
info = &connector->display_info;
info->width_mm = nt->conf->width_mm;
info->height_mm = nt->conf->height_mm;
mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
if (!mode) {
dev_err(panel->dev, "bad mode or failed to add mode\n");
return -EINVAL;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
mode->width_mm = nt->conf->width_mm;
mode->height_mm = nt->conf->height_mm;
drm_mode_probed_add(connector, mode);
return 1; /* Number of modes */
}
static const struct drm_panel_funcs nt35510_drm_funcs = {
.unprepare = nt35510_unprepare,
.prepare = nt35510_prepare,
.get_modes = nt35510_get_modes,
};
static int nt35510_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct nt35510 *nt;
int ret;
nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
if (!nt)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, nt);
nt->dev = dev;
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
/*
* Datasheet suggests max HS rate for NT35510 is 250 MHz
* (period time 4ns, see figure 7.6.4 page 365) and max LP rate is
* 20 MHz (period time 50ns, see figure 7.6.6. page 366).
* However these frequencies appear in source code for the Hydis
* HVA40WV1 panel and setting up the LP frequency makes the panel
* not work.
*
* TODO: if other panels prove to be closer to the datasheet,
* maybe make this a per-panel config in struct nt35510_config?
*/
dsi->hs_rate = 349440000;
dsi->lp_rate = 9600000;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
/*
* Every new incarnation of this display must have a unique
* data entry for the system in this driver.
*/
nt->conf = of_device_get_match_data(dev);
if (!nt->conf) {
dev_err(dev, "missing device configuration\n");
return -ENODEV;
}
nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
nt->supplies);
if (ret < 0)
return ret;
ret = regulator_set_voltage(nt->supplies[0].consumer,
2300000, 4800000);
if (ret)
return ret;
ret = regulator_set_voltage(nt->supplies[1].consumer,
1650000, 3300000);
if (ret)
return ret;
nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
if (IS_ERR(nt->reset_gpio)) {
dev_err(dev, "error getting RESET GPIO\n");
return PTR_ERR(nt->reset_gpio);
}
drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
/*
* First, try to locate an external backlight (such as on GPIO)
* if this fails, assume we will want to use the internal backlight
* control.
*/
ret = drm_panel_of_backlight(&nt->panel);
if (ret) {
dev_err(dev, "error getting external backlight %d\n", ret);
return ret;
}
if (!nt->panel.backlight) {
struct backlight_device *bl;
bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
&nt35510_bl_ops, NULL);
if (IS_ERR(bl)) {
dev_err(dev, "failed to register backlight device\n");
return PTR_ERR(bl);
}
bl->props.max_brightness = 255;
bl->props.brightness = 255;
bl->props.power = FB_BLANK_POWERDOWN;
nt->panel.backlight = bl;
}
drm_panel_add(&nt->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
drm_panel_remove(&nt->panel);
return 0;
}
static void nt35510_remove(struct mipi_dsi_device *dsi)
{
struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
int ret;
mipi_dsi_detach(dsi);
/* Power off */
ret = nt35510_power_off(nt);
if (ret)
dev_err(&dsi->dev, "Failed to power off\n");
drm_panel_remove(&nt->panel);
}
/*
* These gamma correction values are 10bit tuples, so only bits 0 and 1 is
* ever used in the first byte. They form a positive and negative gamma
* correction curve for each color, values must be strictly higher for each
* step on the curve. As can be seen these default curves goes from 0x0001
* to 0x03FE.
*/
#define NT35510_GAMMA_POS_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
0x83, 0x02, 0x78, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
#define NT35510_GAMMA_NEG_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
0x43, 0x02, 0x50, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
/*
* The Hydis HVA40WV1 panel
*/
static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.width_mm = 52,
.height_mm = 86,
/**
* As the Hydis panel is used in command mode, the porches etc
* are settings programmed internally into the NT35510 controller
* and generated toward the physical display. As the panel is not
* used in video mode, these are not really exposed to the DSI
* host.
*
* Display frame rate control:
* Frame rate = (20 MHz / 1) / (389 * (7 + 50 + 800)) ~= 60 Hz
*/
.mode = {
/* The internal pixel clock of the NT35510 is 20 MHz */
.clock = 20000,
.hdisplay = 480,
.hsync_start = 480 + 2, /* HFP = 2 */
.hsync_end = 480 + 2 + 0, /* HSync = 0 */
.htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
.vdisplay = 800,
.vsync_start = 800 + 2, /* VFP = 2 */
.vsync_end = 800 + 2 + 0, /* VSync = 0 */
.vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
.flags = 0,
},
/* 0x09: AVDD = 5.6V */
.avdd = { 0x09, 0x09, 0x09 },
/* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
.bt1ctr = { 0x34, 0x34, 0x34 },
/* 0x09: AVEE = -5.6V */
.avee = { 0x09, 0x09, 0x09 },
/* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
.bt2ctr = { 0x24, 0x24, 0x24 },
/* 0x05 = 12V */
.vgh = { 0x05, 0x05, 0x05 },
/* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
.bt4ctr = { 0x24, 0x24, 0x24 },
/* 0x0B = -13V */
.vgl = { 0x0B, 0x0B, 0x0B },
/* 0x24: LCKA = Hsync, VGL = AVDD + VCL - AVDD */
.bt5ctr = { 0x24, 0x24, 0x24 },
/* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
.vgp = { 0x00, 0xA3, 0x00 },
/* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
.vgn = { 0x00, 0xA3, 0x00 },
/* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
.sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
/* SDVPCTR: Normal operation off color during v porch */
.sdvpctr = 0x01,
/* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
.t1 = 0x0184,
/* VBP: vertical back porch toward the panel */
.vbp = 7,
/* VFP: vertical front porch toward the panel */
.vfp = 50,
/* PSEL: divide pixel clock 20MHz with 1 (no clock downscaling) */
.psel = 0,
/* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
.dpmctr12 = { 0x03, 0x00, 0x00, },
/* Default gamma correction values */
.gamma_corr_pos_r = { NT35510_GAMMA_POS_DEFAULT },
.gamma_corr_pos_g = { NT35510_GAMMA_POS_DEFAULT },
.gamma_corr_pos_b = { NT35510_GAMMA_POS_DEFAULT },
.gamma_corr_neg_r = { NT35510_GAMMA_NEG_DEFAULT },
.gamma_corr_neg_g = { NT35510_GAMMA_NEG_DEFAULT },
.gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
};
static const struct of_device_id nt35510_of_match[] = {
{
.compatible = "hydis,hva40wv1",
.data = &nt35510_hydis_hva40wv1,
},
{ }
};
MODULE_DEVICE_TABLE(of, nt35510_of_match);
static struct mipi_dsi_driver nt35510_driver = {
.probe = nt35510_probe,
.remove = nt35510_remove,
.driver = {
.name = "panel-novatek-nt35510",
.of_match_table = nt35510_of_match,
},
};
module_mipi_dsi_driver(nt35510_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("NT35510-based panel driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-novatek-nt35510.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2020 Linaro Ltd
* Author: Sumit Semwal <[email protected]>
*
* This driver is for the DSI interface to panels using the NT36672A display driver IC
* from Novatek.
* Currently supported are the Tianma FHD+ panels found in some Xiaomi phones, including
* some variants of the Poco F1 phone.
*
* Panels using the Novatek NT37762A IC should add appropriate configuration per-panel and
* use this driver.
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
struct nt36672a_panel_cmd {
const char data[2];
};
static const char * const nt36672a_regulator_names[] = {
"vddio",
"vddpos",
"vddneg",
};
static unsigned long const nt36672a_regulator_enable_loads[] = {
62000,
100000,
100000
};
struct nt36672a_panel_desc {
const struct drm_display_mode *display_mode;
const char *panel_name;
unsigned int width_mm;
unsigned int height_mm;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
unsigned int lanes;
unsigned int num_on_cmds_1;
const struct nt36672a_panel_cmd *on_cmds_1;
unsigned int num_on_cmds_2;
const struct nt36672a_panel_cmd *on_cmds_2;
unsigned int num_off_cmds;
const struct nt36672a_panel_cmd *off_cmds;
};
struct nt36672a_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
const struct nt36672a_panel_desc *desc;
struct regulator_bulk_data supplies[ARRAY_SIZE(nt36672a_regulator_names)];
struct gpio_desc *reset_gpio;
bool prepared;
};
static inline struct nt36672a_panel *to_nt36672a_panel(struct drm_panel *panel)
{
return container_of(panel, struct nt36672a_panel, base);
}
static int nt36672a_send_cmds(struct drm_panel *panel, const struct nt36672a_panel_cmd *cmds,
int num)
{
struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
unsigned int i;
int err;
for (i = 0; i < num; i++) {
const struct nt36672a_panel_cmd *cmd = &cmds[i];
err = mipi_dsi_dcs_write(pinfo->link, cmd->data[0], cmd->data + 1, 1);
if (err < 0)
return err;
}
return 0;
}
static int nt36672a_panel_power_off(struct drm_panel *panel)
{
struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
int ret = 0;
gpiod_set_value(pinfo->reset_gpio, 1);
ret = regulator_bulk_disable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
if (ret)
dev_err(panel->dev, "regulator_bulk_disable failed %d\n", ret);
return ret;
}
static int nt36672a_panel_unprepare(struct drm_panel *panel)
{
struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
int ret;
if (!pinfo->prepared)
return 0;
/* send off cmds */
ret = nt36672a_send_cmds(panel, pinfo->desc->off_cmds,
pinfo->desc->num_off_cmds);
if (ret < 0)
dev_err(panel->dev, "failed to send DCS off cmds: %d\n", ret);
ret = mipi_dsi_dcs_set_display_off(pinfo->link);
if (ret < 0)
dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret);
/* 120ms delay required here as per DCS spec */
msleep(120);
ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
if (ret < 0)
dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret);
/* 0x3C = 60ms delay */
msleep(60);
ret = nt36672a_panel_power_off(panel);
if (ret < 0)
dev_err(panel->dev, "power_off failed ret = %d\n", ret);
pinfo->prepared = false;
return ret;
}
static int nt36672a_panel_power_on(struct nt36672a_panel *pinfo)
{
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
if (ret < 0)
return ret;
/*
* As per downstream kernel, Reset sequence of Tianma FHD panel requires the panel to
* be out of reset for 10ms, followed by being held in reset for 10ms. But for Android
* AOSP, we needed to bump it upto 200ms otherwise we get white screen sometimes.
* FIXME: Try to reduce this 200ms to a lesser value.
*/
gpiod_set_value(pinfo->reset_gpio, 1);
msleep(200);
gpiod_set_value(pinfo->reset_gpio, 0);
msleep(200);
return 0;
}
static int nt36672a_panel_prepare(struct drm_panel *panel)
{
struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
int err;
if (pinfo->prepared)
return 0;
err = nt36672a_panel_power_on(pinfo);
if (err < 0)
goto poweroff;
/* send first part of init cmds */
err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_1,
pinfo->desc->num_on_cmds_1);
if (err < 0) {
dev_err(panel->dev, "failed to send DCS Init 1st Code: %d\n", err);
goto poweroff;
}
err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
if (err < 0) {
dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
/* 0x46 = 70 ms delay */
msleep(70);
err = mipi_dsi_dcs_set_display_on(pinfo->link);
if (err < 0) {
dev_err(panel->dev, "failed to Set Display ON: %d\n", err);
goto poweroff;
}
/* Send rest of the init cmds */
err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_2,
pinfo->desc->num_on_cmds_2);
if (err < 0) {
dev_err(panel->dev, "failed to send DCS Init 2nd Code: %d\n", err);
goto poweroff;
}
msleep(120);
pinfo->prepared = true;
return 0;
poweroff:
gpiod_set_value(pinfo->reset_gpio, 0);
return err;
}
static int nt36672a_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
const struct drm_display_mode *m = pinfo->desc->display_mode;
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n", m->hdisplay,
m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
connector->display_info.width_mm = pinfo->desc->width_mm;
connector->display_info.height_mm = pinfo->desc->height_mm;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs panel_funcs = {
.unprepare = nt36672a_panel_unprepare,
.prepare = nt36672a_panel_prepare,
.get_modes = nt36672a_panel_get_modes,
};
static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_1[] = {
/* skin enhancement mode */
{ .data = {0xFF, 0x22} },
{ .data = {0x00, 0x40} },
{ .data = {0x01, 0xC0} },
{ .data = {0x02, 0x40} },
{ .data = {0x03, 0x40} },
{ .data = {0x04, 0x40} },
{ .data = {0x05, 0x40} },
{ .data = {0x06, 0x40} },
{ .data = {0x07, 0x40} },
{ .data = {0x08, 0x40} },
{ .data = {0x09, 0x40} },
{ .data = {0x0A, 0x40} },
{ .data = {0x0B, 0x40} },
{ .data = {0x0C, 0x40} },
{ .data = {0x0D, 0x40} },
{ .data = {0x0E, 0x40} },
{ .data = {0x0F, 0x40} },
{ .data = {0x10, 0x40} },
{ .data = {0x11, 0x50} },
{ .data = {0x12, 0x60} },
{ .data = {0x13, 0x70} },
{ .data = {0x14, 0x58} },
{ .data = {0x15, 0x68} },
{ .data = {0x16, 0x78} },
{ .data = {0x17, 0x77} },
{ .data = {0x18, 0x39} },
{ .data = {0x19, 0x2D} },
{ .data = {0x1A, 0x2E} },
{ .data = {0x1B, 0x32} },
{ .data = {0x1C, 0x37} },
{ .data = {0x1D, 0x3A} },
{ .data = {0x1E, 0x40} },
{ .data = {0x1F, 0x40} },
{ .data = {0x20, 0x40} },
{ .data = {0x21, 0x40} },
{ .data = {0x22, 0x40} },
{ .data = {0x23, 0x40} },
{ .data = {0x24, 0x40} },
{ .data = {0x25, 0x40} },
{ .data = {0x26, 0x40} },
{ .data = {0x27, 0x40} },
{ .data = {0x28, 0x40} },
{ .data = {0x2D, 0x00} },
{ .data = {0x2F, 0x40} },
{ .data = {0x30, 0x40} },
{ .data = {0x31, 0x40} },
{ .data = {0x32, 0x40} },
{ .data = {0x33, 0x40} },
{ .data = {0x34, 0x40} },
{ .data = {0x35, 0x40} },
{ .data = {0x36, 0x40} },
{ .data = {0x37, 0x40} },
{ .data = {0x38, 0x40} },
{ .data = {0x39, 0x40} },
{ .data = {0x3A, 0x40} },
{ .data = {0x3B, 0x40} },
{ .data = {0x3D, 0x40} },
{ .data = {0x3F, 0x40} },
{ .data = {0x40, 0x40} },
{ .data = {0x41, 0x40} },
{ .data = {0x42, 0x40} },
{ .data = {0x43, 0x40} },
{ .data = {0x44, 0x40} },
{ .data = {0x45, 0x40} },
{ .data = {0x46, 0x40} },
{ .data = {0x47, 0x40} },
{ .data = {0x48, 0x40} },
{ .data = {0x49, 0x40} },
{ .data = {0x4A, 0x40} },
{ .data = {0x4B, 0x40} },
{ .data = {0x4C, 0x40} },
{ .data = {0x4D, 0x40} },
{ .data = {0x4E, 0x40} },
{ .data = {0x4F, 0x40} },
{ .data = {0x50, 0x40} },
{ .data = {0x51, 0x40} },
{ .data = {0x52, 0x40} },
{ .data = {0x53, 0x01} },
{ .data = {0x54, 0x01} },
{ .data = {0x55, 0xFE} },
{ .data = {0x56, 0x77} },
{ .data = {0x58, 0xCD} },
{ .data = {0x59, 0xD0} },
{ .data = {0x5A, 0xD0} },
{ .data = {0x5B, 0x50} },
{ .data = {0x5C, 0x50} },
{ .data = {0x5D, 0x50} },
{ .data = {0x5E, 0x50} },
{ .data = {0x5F, 0x50} },
{ .data = {0x60, 0x50} },
{ .data = {0x61, 0x50} },
{ .data = {0x62, 0x50} },
{ .data = {0x63, 0x50} },
{ .data = {0x64, 0x50} },
{ .data = {0x65, 0x50} },
{ .data = {0x66, 0x50} },
{ .data = {0x67, 0x50} },
{ .data = {0x68, 0x50} },
{ .data = {0x69, 0x50} },
{ .data = {0x6A, 0x50} },
{ .data = {0x6B, 0x50} },
{ .data = {0x6C, 0x50} },
{ .data = {0x6D, 0x50} },
{ .data = {0x6E, 0x50} },
{ .data = {0x6F, 0x50} },
{ .data = {0x70, 0x07} },
{ .data = {0x71, 0x00} },
{ .data = {0x72, 0x00} },
{ .data = {0x73, 0x00} },
{ .data = {0x74, 0x06} },
{ .data = {0x75, 0x0C} },
{ .data = {0x76, 0x03} },
{ .data = {0x77, 0x09} },
{ .data = {0x78, 0x0F} },
{ .data = {0x79, 0x68} },
{ .data = {0x7A, 0x88} },
{ .data = {0x7C, 0x80} },
{ .data = {0x7D, 0x80} },
{ .data = {0x7E, 0x80} },
{ .data = {0x7F, 0x00} },
{ .data = {0x80, 0x00} },
{ .data = {0x81, 0x00} },
{ .data = {0x83, 0x01} },
{ .data = {0x84, 0x00} },
{ .data = {0x85, 0x80} },
{ .data = {0x86, 0x80} },
{ .data = {0x87, 0x80} },
{ .data = {0x88, 0x40} },
{ .data = {0x89, 0x91} },
{ .data = {0x8A, 0x98} },
{ .data = {0x8B, 0x80} },
{ .data = {0x8C, 0x80} },
{ .data = {0x8D, 0x80} },
{ .data = {0x8E, 0x80} },
{ .data = {0x8F, 0x80} },
{ .data = {0x90, 0x80} },
{ .data = {0x91, 0x80} },
{ .data = {0x92, 0x80} },
{ .data = {0x93, 0x80} },
{ .data = {0x94, 0x80} },
{ .data = {0x95, 0x80} },
{ .data = {0x96, 0x80} },
{ .data = {0x97, 0x80} },
{ .data = {0x98, 0x80} },
{ .data = {0x99, 0x80} },
{ .data = {0x9A, 0x80} },
{ .data = {0x9B, 0x80} },
{ .data = {0x9C, 0x80} },
{ .data = {0x9D, 0x80} },
{ .data = {0x9E, 0x80} },
{ .data = {0x9F, 0x80} },
{ .data = {0xA0, 0x8A} },
{ .data = {0xA2, 0x80} },
{ .data = {0xA6, 0x80} },
{ .data = {0xA7, 0x80} },
{ .data = {0xA9, 0x80} },
{ .data = {0xAA, 0x80} },
{ .data = {0xAB, 0x80} },
{ .data = {0xAC, 0x80} },
{ .data = {0xAD, 0x80} },
{ .data = {0xAE, 0x80} },
{ .data = {0xAF, 0x80} },
{ .data = {0xB7, 0x76} },
{ .data = {0xB8, 0x76} },
{ .data = {0xB9, 0x05} },
{ .data = {0xBA, 0x0D} },
{ .data = {0xBB, 0x14} },
{ .data = {0xBC, 0x0F} },
{ .data = {0xBD, 0x18} },
{ .data = {0xBE, 0x1F} },
{ .data = {0xBF, 0x05} },
{ .data = {0xC0, 0x0D} },
{ .data = {0xC1, 0x14} },
{ .data = {0xC2, 0x03} },
{ .data = {0xC3, 0x07} },
{ .data = {0xC4, 0x0A} },
{ .data = {0xC5, 0xA0} },
{ .data = {0xC6, 0x55} },
{ .data = {0xC7, 0xFF} },
{ .data = {0xC8, 0x39} },
{ .data = {0xC9, 0x44} },
{ .data = {0xCA, 0x12} },
{ .data = {0xCD, 0x80} },
{ .data = {0xDB, 0x80} },
{ .data = {0xDC, 0x80} },
{ .data = {0xDD, 0x80} },
{ .data = {0xE0, 0x80} },
{ .data = {0xE1, 0x80} },
{ .data = {0xE2, 0x80} },
{ .data = {0xE3, 0x80} },
{ .data = {0xE4, 0x80} },
{ .data = {0xE5, 0x40} },
{ .data = {0xE6, 0x40} },
{ .data = {0xE7, 0x40} },
{ .data = {0xE8, 0x40} },
{ .data = {0xE9, 0x40} },
{ .data = {0xEA, 0x40} },
{ .data = {0xEB, 0x40} },
{ .data = {0xEC, 0x40} },
{ .data = {0xED, 0x40} },
{ .data = {0xEE, 0x40} },
{ .data = {0xEF, 0x40} },
{ .data = {0xF0, 0x40} },
{ .data = {0xF1, 0x40} },
{ .data = {0xF2, 0x40} },
{ .data = {0xF3, 0x40} },
{ .data = {0xF4, 0x40} },
{ .data = {0xF5, 0x40} },
{ .data = {0xF6, 0x40} },
{ .data = {0xFB, 0x1} },
{ .data = {0xFF, 0x23} },
{ .data = {0xFB, 0x01} },
/* dimming enable */
{ .data = {0x01, 0x84} },
{ .data = {0x05, 0x2D} },
{ .data = {0x06, 0x00} },
/* resolution 1080*2246 */
{ .data = {0x11, 0x01} },
{ .data = {0x12, 0x7B} },
{ .data = {0x15, 0x6F} },
{ .data = {0x16, 0x0B} },
/* UI mode */
{ .data = {0x29, 0x0A} },
{ .data = {0x30, 0xFF} },
{ .data = {0x31, 0xFF} },
{ .data = {0x32, 0xFF} },
{ .data = {0x33, 0xFF} },
{ .data = {0x34, 0xFF} },
{ .data = {0x35, 0xFF} },
{ .data = {0x36, 0xFF} },
{ .data = {0x37, 0xFF} },
{ .data = {0x38, 0xFC} },
{ .data = {0x39, 0xF8} },
{ .data = {0x3A, 0xF4} },
{ .data = {0x3B, 0xF1} },
{ .data = {0x3D, 0xEE} },
{ .data = {0x3F, 0xEB} },
{ .data = {0x40, 0xE8} },
{ .data = {0x41, 0xE5} },
/* STILL mode */
{ .data = {0x2A, 0x13} },
{ .data = {0x45, 0xFF} },
{ .data = {0x46, 0xFF} },
{ .data = {0x47, 0xFF} },
{ .data = {0x48, 0xFF} },
{ .data = {0x49, 0xFF} },
{ .data = {0x4A, 0xFF} },
{ .data = {0x4B, 0xFF} },
{ .data = {0x4C, 0xFF} },
{ .data = {0x4D, 0xED} },
{ .data = {0x4E, 0xD5} },
{ .data = {0x4F, 0xBF} },
{ .data = {0x50, 0xA6} },
{ .data = {0x51, 0x96} },
{ .data = {0x52, 0x86} },
{ .data = {0x53, 0x76} },
{ .data = {0x54, 0x66} },
/* MOVING mode */
{ .data = {0x2B, 0x0E} },
{ .data = {0x58, 0xFF} },
{ .data = {0x59, 0xFF} },
{ .data = {0x5A, 0xFF} },
{ .data = {0x5B, 0xFF} },
{ .data = {0x5C, 0xFF} },
{ .data = {0x5D, 0xFF} },
{ .data = {0x5E, 0xFF} },
{ .data = {0x5F, 0xFF} },
{ .data = {0x60, 0xF6} },
{ .data = {0x61, 0xEA} },
{ .data = {0x62, 0xE1} },
{ .data = {0x63, 0xD8} },
{ .data = {0x64, 0xCE} },
{ .data = {0x65, 0xC3} },
{ .data = {0x66, 0xBA} },
{ .data = {0x67, 0xB3} },
{ .data = {0xFF, 0x25} },
{ .data = {0xFB, 0x01} },
{ .data = {0x05, 0x04} },
{ .data = {0xFF, 0x26} },
{ .data = {0xFB, 0x01} },
{ .data = {0x1C, 0xAF} },
{ .data = {0xFF, 0x10} },
{ .data = {0xFB, 0x01} },
{ .data = {0x51, 0xFF} },
{ .data = {0x53, 0x24} },
{ .data = {0x55, 0x00} },
};
static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_2[] = {
{ .data = {0xFF, 0x24} },
{ .data = {0xFB, 0x01} },
{ .data = {0xC3, 0x01} },
{ .data = {0xC4, 0x54} },
{ .data = {0xFF, 0x10} },
};
static const struct nt36672a_panel_cmd tianma_fhd_video_off_cmds[] = {
{ .data = {0xFF, 0x24} },
{ .data = {0xFB, 0x01} },
{ .data = {0xC3, 0x01} },
{ .data = {0xFF, 0x10} },
};
static const struct drm_display_mode tianma_fhd_video_panel_default_mode = {
.clock = 161331,
.hdisplay = 1080,
.hsync_start = 1080 + 40,
.hsync_end = 1080 + 40 + 20,
.htotal = 1080 + 40 + 20 + 44,
.vdisplay = 2246,
.vsync_start = 2246 + 15,
.vsync_end = 2246 + 15 + 2,
.vtotal = 2246 + 15 + 2 + 8,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static const struct nt36672a_panel_desc tianma_fhd_video_panel_desc = {
.display_mode = &tianma_fhd_video_panel_default_mode,
.width_mm = 68,
.height_mm = 136,
.mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
| MIPI_DSI_MODE_VIDEO_HSE
| MIPI_DSI_CLOCK_NON_CONTINUOUS
| MIPI_DSI_MODE_VIDEO_BURST,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
.on_cmds_1 = tianma_fhd_video_on_cmds_1,
.num_on_cmds_1 = ARRAY_SIZE(tianma_fhd_video_on_cmds_1),
.on_cmds_2 = tianma_fhd_video_on_cmds_2,
.num_on_cmds_2 = ARRAY_SIZE(tianma_fhd_video_on_cmds_2),
.off_cmds = tianma_fhd_video_off_cmds,
.num_off_cmds = ARRAY_SIZE(tianma_fhd_video_off_cmds),
};
static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
{
struct device *dev = &pinfo->link->dev;
int i, ret;
for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++)
pinfo->supplies[i].supply = nt36672a_regulator_names[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies),
pinfo->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get regulators\n");
for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
ret = regulator_set_load(pinfo->supplies[i].consumer,
nt36672a_regulator_enable_loads[i]);
if (ret)
return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
}
pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pinfo->reset_gpio))
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
"failed to get reset gpio from DT\n");
drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
drm_panel_add(&pinfo->base);
return 0;
}
static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
{
struct nt36672a_panel *pinfo;
const struct nt36672a_panel_desc *desc;
int err;
pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
pinfo->desc = desc;
pinfo->link = dsi;
mipi_dsi_set_drvdata(dsi, pinfo);
err = nt36672a_panel_add(pinfo);
if (err < 0)
return err;
err = mipi_dsi_attach(dsi);
if (err < 0) {
drm_panel_remove(&pinfo->base);
return err;
}
return 0;
}
static void nt36672a_panel_remove(struct mipi_dsi_device *dsi)
{
struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
int err;
err = drm_panel_unprepare(&pinfo->base);
if (err < 0)
dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
err = drm_panel_disable(&pinfo->base);
if (err < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
err = mipi_dsi_detach(dsi);
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&pinfo->base);
}
static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
drm_panel_disable(&pinfo->base);
drm_panel_unprepare(&pinfo->base);
}
static const struct of_device_id tianma_fhd_video_of_match[] = {
{ .compatible = "tianma,fhd-video", .data = &tianma_fhd_video_panel_desc },
{ },
};
MODULE_DEVICE_TABLE(of, tianma_fhd_video_of_match);
static struct mipi_dsi_driver nt36672a_panel_driver = {
.driver = {
.name = "panel-tianma-nt36672a",
.of_match_table = tianma_fhd_video_of_match,
},
.probe = nt36672a_panel_probe,
.remove = nt36672a_panel_remove,
.shutdown = nt36672a_panel_shutdown,
};
module_mipi_dsi_driver(nt36672a_panel_driver);
MODULE_AUTHOR("Sumit Semwal <[email protected]>");
MODULE_DESCRIPTION("NOVATEK NT36672A based MIPI-DSI LCD panel driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-novatek-nt36672a.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Joel Selvaraj <[email protected]>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
static const char * const regulator_names[] = {
"vddio",
"vddpos",
"vddneg",
};
static const unsigned long regulator_enable_loads[] = {
62000,
100000,
100000
};
struct ebbg_ft8719 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
struct gpio_desc *reset_gpio;
};
static inline
struct ebbg_ft8719 *to_ebbg_ft8719(struct drm_panel *panel)
{
return container_of(panel, struct ebbg_ft8719, panel);
}
static void ebbg_ft8719_reset(struct ebbg_ft8719 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(4000, 5000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(1000, 2000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(15000, 16000);
}
static int ebbg_ft8719_on(struct ebbg_ft8719 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
if (ret < 0) {
dev_err(dev, "Failed to set display brightness: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(90);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int ebbg_ft8719_off(struct ebbg_ft8719 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
usleep_range(10000, 11000);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(90);
return 0;
}
static int ebbg_ft8719_prepare(struct drm_panel *panel)
{
struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
ebbg_ft8719_reset(ctx);
ret = ebbg_ft8719_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
return 0;
}
static int ebbg_ft8719_unprepare(struct drm_panel *panel)
{
struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
ret = ebbg_ft8719_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
dev_err(panel->dev, "Failed to disable regulators: %d\n", ret);
return 0;
}
static const struct drm_display_mode ebbg_ft8719_mode = {
.clock = (1080 + 28 + 4 + 16) * (2246 + 120 + 4 + 12) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 28,
.hsync_end = 1080 + 28 + 4,
.htotal = 1080 + 28 + 4 + 16,
.vdisplay = 2246,
.vsync_start = 2246 + 120,
.vsync_end = 2246 + 120 + 4,
.vtotal = 2246 + 120 + 4 + 12,
.width_mm = 68,
.height_mm = 141,
};
static int ebbg_ft8719_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &ebbg_ft8719_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs ebbg_ft8719_panel_funcs = {
.prepare = ebbg_ft8719_prepare,
.unprepare = ebbg_ft8719_unprepare,
.get_modes = ebbg_ft8719_get_modes,
};
static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct ebbg_ft8719 *ctx;
int i, ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
ret = regulator_set_load(ctx->supplies[i].consumer,
regulator_enable_loads[i]);
if (ret)
return dev_err_probe(dev, ret,
"Failed to set regulator load\n");
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
{
struct ebbg_ft8719 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id ebbg_ft8719_of_match[] = {
{ .compatible = "ebbg,ft8719" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ebbg_ft8719_of_match);
static struct mipi_dsi_driver ebbg_ft8719_driver = {
.probe = ebbg_ft8719_probe,
.remove = ebbg_ft8719_remove,
.driver = {
.name = "panel-ebbg-ft8719",
.of_match_table = ebbg_ft8719_of_match,
},
};
module_mipi_dsi_driver(ebbg_ft8719_driver);
MODULE_AUTHOR("Joel Selvaraj <[email protected]>");
MODULE_DESCRIPTION("DRM driver for EBBG FT8719 video dsi panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-ebbg-ft8719.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Konrad Dybcio <[email protected]>
*
* Generated with linux-mdss-dsi-panel-driver-generator with a
* substantial amount of manual adjustments.
*
* SONY Downstream kernel calls this one:
* - "JDI ID3" for Akari (XZ2)
* - "JDI ID4" for Apollo (XZ2 Compact)
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
enum {
TYPE_TAMA_60HZ,
/*
* Leaving room for expansion - SONY very often uses
* *truly reliably* overclockable panels on their flagships!
*/
};
struct sony_td4353_jdi {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[3];
struct gpio_desc *panel_reset_gpio;
struct gpio_desc *touch_reset_gpio;
bool prepared;
int type;
};
static inline struct sony_td4353_jdi *to_sony_td4353_jdi(struct drm_panel *panel)
{
return container_of(panel, struct sony_td4353_jdi, panel);
}
static int sony_td4353_jdi_on(struct sony_td4353_jdi *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 1080 - 1);
if (ret < 0) {
dev_err(dev, "Failed to set column address: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 2160 - 1);
if (ret < 0) {
dev_err(dev, "Failed to set page address: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
if (ret < 0) {
dev_err(dev, "Failed to set tear scanline: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
dev_err(dev, "Failed to set tear on: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
if (ret < 0) {
dev_err(dev, "Failed to set pixel format: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS,
0x00, 0x00, 0x08, 0x6f);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(70);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to turn display on: %d\n", ret);
return ret;
}
return 0;
}
static int sony_td4353_jdi_off(struct sony_td4353_jdi *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
msleep(22);
ret = mipi_dsi_dcs_set_tear_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set tear off: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(80);
return 0;
}
static void sony_td4353_assert_reset_gpios(struct sony_td4353_jdi *ctx, int mode)
{
gpiod_set_value_cansleep(ctx->touch_reset_gpio, mode);
gpiod_set_value_cansleep(ctx->panel_reset_gpio, mode);
usleep_range(5000, 5100);
}
static int sony_td4353_jdi_prepare(struct drm_panel *panel)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
return ret;
}
msleep(100);
sony_td4353_assert_reset_gpios(ctx, 1);
ret = sony_td4353_jdi_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to power on panel: %d\n", ret);
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return ret;
}
ctx->prepared = true;
return 0;
}
static int sony_td4353_jdi_unprepare(struct drm_panel *panel)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (!ctx->prepared)
return 0;
ret = sony_td4353_jdi_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to power off panel: %d\n", ret);
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
ctx->prepared = false;
return 0;
}
static const struct drm_display_mode sony_td4353_jdi_mode_tama_60hz = {
.clock = (1080 + 4 + 8 + 8) * (2160 + 259 + 8 + 8) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 4,
.hsync_end = 1080 + 4 + 8,
.htotal = 1080 + 4 + 8 + 8,
.vdisplay = 2160,
.vsync_start = 2160 + 259,
.vsync_end = 2160 + 259 + 8,
.vtotal = 2160 + 259 + 8 + 8,
.width_mm = 64,
.height_mm = 128,
};
static int sony_td4353_jdi_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
struct drm_display_mode *mode = NULL;
if (ctx->type == TYPE_TAMA_60HZ)
mode = drm_mode_duplicate(connector->dev, &sony_td4353_jdi_mode_tama_60hz);
else
return -EINVAL;
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs sony_td4353_jdi_panel_funcs = {
.prepare = sony_td4353_jdi_prepare,
.unprepare = sony_td4353_jdi_unprepare,
.get_modes = sony_td4353_jdi_get_modes,
};
static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct sony_td4353_jdi *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->type = (uintptr_t)of_device_get_match_data(dev);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "vsp";
ctx->supplies[2].supply = "vsn";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
ctx->panel_reset_gpio = devm_gpiod_get(dev, "panel-reset", GPIOD_ASIS);
if (IS_ERR(ctx->panel_reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->panel_reset_gpio),
"Failed to get panel-reset-gpios\n");
ctx->touch_reset_gpio = devm_gpiod_get(dev, "touch-reset", GPIOD_ASIS);
if (IS_ERR(ctx->touch_reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->touch_reset_gpio),
"Failed to get touch-reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &sony_td4353_jdi_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void sony_td4353_jdi_remove(struct mipi_dsi_device *dsi)
{
struct sony_td4353_jdi *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id sony_td4353_jdi_of_match[] = {
{ .compatible = "sony,td4353-jdi-tama", .data = (void *)TYPE_TAMA_60HZ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sony_td4353_jdi_of_match);
static struct mipi_dsi_driver sony_td4353_jdi_driver = {
.probe = sony_td4353_jdi_probe,
.remove = sony_td4353_jdi_remove,
.driver = {
.name = "panel-sony-td4353-jdi",
.of_match_table = sony_td4353_jdi_of_match,
},
};
module_mipi_dsi_driver(sony_td4353_jdi_driver);
MODULE_AUTHOR("Konrad Dybcio <[email protected]>");
MODULE_DESCRIPTION("DRM panel driver for SONY Xperia XZ2/XZ2c JDI panel");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-sony-td4353-jdi.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Caleb Connolly <[email protected]>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/backlight.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct sofef00_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct gpio_desc *reset_gpio;
const struct drm_display_mode *mode;
bool prepared;
};
static inline
struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel)
{
return container_of(panel, struct sofef00_panel, panel);
}
static void sofef00_panel_reset(struct sofef00_panel *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(5000, 6000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(2000, 3000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(12000, 13000);
}
static int sofef00_panel_on(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
usleep_range(10000, 11000);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
dev_err(dev, "Failed to set tear on: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int sofef00_panel_off(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
msleep(40);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(160);
return 0;
}
static int sofef00_panel_prepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (ctx->prepared)
return 0;
ret = regulator_enable(ctx->supply);
if (ret < 0) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return ret;
}
sofef00_panel_reset(ctx);
ret = sofef00_panel_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
ctx->prepared = true;
return 0;
}
static int sofef00_panel_unprepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
if (!ctx->prepared)
return 0;
ret = sofef00_panel_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
regulator_disable(ctx->supply);
ctx->prepared = false;
return 0;
}
static const struct drm_display_mode enchilada_panel_mode = {
.clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 112,
.hsync_end = 1080 + 112 + 16,
.htotal = 1080 + 112 + 16 + 36,
.vdisplay = 2280,
.vsync_start = 2280 + 36,
.vsync_end = 2280 + 36 + 8,
.vtotal = 2280 + 36 + 8 + 12,
.width_mm = 68,
.height_mm = 145,
};
static const struct drm_display_mode fajita_panel_mode = {
.clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 72,
.hsync_end = 1080 + 72 + 16,
.htotal = 1080 + 72 + 16 + 36,
.vdisplay = 2340,
.vsync_start = 2340 + 32,
.vsync_end = 2340 + 32 + 4,
.vtotal = 2340 + 32 + 4 + 18,
.width_mm = 68,
.height_mm = 145,
};
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct sofef00_panel *ctx = to_sofef00_panel(panel);
mode = drm_mode_duplicate(connector->dev, ctx->mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs sofef00_panel_panel_funcs = {
.prepare = sofef00_panel_prepare,
.unprepare = sofef00_panel_unprepare,
.get_modes = sofef00_panel_get_modes,
};
static int sofef00_panel_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
int err;
u16 brightness = (u16)backlight_get_brightness(bl);
err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
if (err < 0)
return err;
return 0;
}
static const struct backlight_ops sofef00_panel_bl_ops = {
.update_status = sofef00_panel_bl_update_status,
};
static struct backlight_device *
sofef00_create_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_PLATFORM,
.brightness = 1023,
.max_brightness = 1023,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&sofef00_panel_bl_ops, &props);
}
static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct sofef00_panel *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->mode = of_device_get_match_data(dev);
if (!ctx->mode) {
dev_err(dev, "Missing device mode\n");
return -ENODEV;
}
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
return dev_err_probe(dev, PTR_ERR(ctx->supply),
"Failed to get vddio regulator\n");
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
drm_panel_init(&ctx->panel, dev, &sofef00_panel_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
"Failed to create backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
{
struct sofef00_panel *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id sofef00_panel_of_match[] = {
{ // OnePlus 6 / enchilada
.compatible = "samsung,sofef00",
.data = &enchilada_panel_mode,
},
{ // OnePlus 6T / fajita
.compatible = "samsung,s6e3fc2x01",
.data = &fajita_panel_mode,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
static struct mipi_dsi_driver sofef00_panel_driver = {
.probe = sofef00_panel_probe,
.remove = sofef00_panel_remove,
.driver = {
.name = "panel-oneplus6",
.of_match_table = sofef00_panel_of_match,
},
};
module_mipi_dsi_driver(sofef00_panel_driver);
MODULE_AUTHOR("Caleb Connolly <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-sofef00.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 InforceComputing
* Copyright (C) 2016 Linaro Ltd
* Copyright (C) 2023 BayLibre, SAS
*
* Authors:
* - Vinay Simha BN <[email protected]>
* - Sumit Semwal <[email protected]>
* - Guillaume La Roque <[email protected]>
*
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define DSI_REG_MCAP 0xB0
#define DSI_REG_IS 0xB3 /* Interface Setting */
#define DSI_REG_IIS 0xB4 /* Interface ID Setting */
#define DSI_REG_CTRL 0xB6
enum {
IOVCC = 0,
POWER = 1
};
struct stk_panel {
bool prepared;
const struct drm_display_mode *mode;
struct backlight_device *backlight;
struct drm_panel base;
struct gpio_desc *enable_gpio; /* Power IC supply enable */
struct gpio_desc *reset_gpio; /* External reset */
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[2];
};
static inline struct stk_panel *to_stk_panel(struct drm_panel *panel)
{
return container_of(panel, struct stk_panel, base);
}
static int stk_panel_init(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
struct device *dev = &stk->dsi->dev;
int ret;
ret = mipi_dsi_dcs_soft_reset(dsi);
if (ret < 0) {
dev_err(dev, "failed to mipi_dsi_dcs_soft_reset: %d\n", ret);
return ret;
}
mdelay(5);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
mipi_dsi_generic_write_seq(dsi, DSI_REG_MCAP, 0x04);
/* Interface setting, video mode */
mipi_dsi_generic_write_seq(dsi, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
mipi_dsi_generic_write_seq(dsi, DSI_REG_IIS, 0x0C, 0x00);
mipi_dsi_generic_write_seq(dsi, DSI_REG_CTRL, 0x3A, 0xD3);
ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x77);
if (ret < 0) {
dev_err(dev, "failed to write display brightness: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
MIPI_DCS_WRITE_MEMORY_START);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
if (ret < 0) {
dev_err(dev, "failed to set pixel format: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_column_address(dsi, 0, stk->mode->hdisplay - 1);
if (ret < 0) {
dev_err(dev, "failed to set column address: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_page_address(dsi, 0, stk->mode->vdisplay - 1);
if (ret < 0) {
dev_err(dev, "failed to set page address: %d\n", ret);
return ret;
}
return 0;
}
static int stk_panel_on(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
struct device *dev = &stk->dsi->dev;
int ret;
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0)
dev_err(dev, "failed to set display on: %d\n", ret);
mdelay(20);
return ret;
}
static void stk_panel_off(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
struct device *dev = &stk->dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
dev_err(dev, "failed to enter sleep mode: %d\n", ret);
msleep(100);
}
static int stk_panel_unprepare(struct drm_panel *panel)
{
struct stk_panel *stk = to_stk_panel(panel);
if (!stk->prepared)
return 0;
stk_panel_off(stk);
regulator_bulk_disable(ARRAY_SIZE(stk->supplies), stk->supplies);
gpiod_set_value(stk->reset_gpio, 0);
gpiod_set_value(stk->enable_gpio, 1);
stk->prepared = false;
return 0;
}
static int stk_panel_prepare(struct drm_panel *panel)
{
struct stk_panel *stk = to_stk_panel(panel);
struct device *dev = &stk->dsi->dev;
int ret;
if (stk->prepared)
return 0;
gpiod_set_value(stk->reset_gpio, 0);
gpiod_set_value(stk->enable_gpio, 0);
ret = regulator_enable(stk->supplies[IOVCC].consumer);
if (ret < 0)
return ret;
mdelay(8);
ret = regulator_enable(stk->supplies[POWER].consumer);
if (ret < 0)
goto iovccoff;
mdelay(20);
gpiod_set_value(stk->enable_gpio, 1);
mdelay(20);
gpiod_set_value(stk->reset_gpio, 1);
mdelay(10);
ret = stk_panel_init(stk);
if (ret < 0) {
dev_err(dev, "failed to init panel: %d\n", ret);
goto poweroff;
}
ret = stk_panel_on(stk);
if (ret < 0) {
dev_err(dev, "failed to set panel on: %d\n", ret);
goto poweroff;
}
stk->prepared = true;
return 0;
poweroff:
regulator_disable(stk->supplies[POWER].consumer);
iovccoff:
regulator_disable(stk->supplies[IOVCC].consumer);
gpiod_set_value(stk->reset_gpio, 0);
gpiod_set_value(stk->enable_gpio, 0);
return ret;
}
static const struct drm_display_mode default_mode = {
.clock = 163204,
.hdisplay = 1200,
.hsync_start = 1200 + 144,
.hsync_end = 1200 + 144 + 16,
.htotal = 1200 + 144 + 16 + 45,
.vdisplay = 1920,
.vsync_start = 1920 + 8,
.vsync_end = 1920 + 8 + 4,
.vtotal = 1920 + 8 + 4 + 4,
.width_mm = 95,
.height_mm = 151,
};
static int stk_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = default_mode.width_mm;
connector->display_info.height_mm = default_mode.height_mm;
return 1;
}
static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
int ret;
u16 brightness;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
if (ret < 0)
return ret;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
return brightness & 0xff;
}
static int dsi_dcs_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
struct device *dev = &dsi->dev;
int ret;
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
if (ret < 0) {
dev_err(dev, "failed to set DSI control: %d\n", ret);
return ret;
}
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
return 0;
}
static const struct backlight_ops dsi_bl_ops = {
.update_status = dsi_dcs_bl_update_status,
.get_brightness = dsi_dcs_bl_get_brightness,
};
static struct backlight_device *
drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = 255,
.max_brightness = 255,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&dsi_bl_ops, &props);
}
static const struct drm_panel_funcs stk_panel_funcs = {
.unprepare = stk_panel_unprepare,
.prepare = stk_panel_prepare,
.get_modes = stk_panel_get_modes,
};
static const struct of_device_id stk_of_match[] = {
{ .compatible = "startek,kd070fhfid015", },
{ }
};
MODULE_DEVICE_TABLE(of, stk_of_match);
static int stk_panel_add(struct stk_panel *stk)
{
struct device *dev = &stk->dsi->dev;
int ret;
stk->mode = &default_mode;
stk->supplies[IOVCC].supply = "iovcc";
stk->supplies[POWER].supply = "power";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(stk->supplies), stk->supplies);
if (ret) {
dev_err(dev, "regulator_bulk failed\n");
return ret;
}
stk->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(stk->reset_gpio)) {
ret = PTR_ERR(stk->reset_gpio);
dev_err(dev, "cannot get reset-gpios %d\n", ret);
return ret;
}
stk->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(stk->enable_gpio)) {
ret = PTR_ERR(stk->enable_gpio);
dev_err(dev, "cannot get enable-gpio %d\n", ret);
return ret;
}
stk->backlight = drm_panel_create_dsi_backlight(stk->dsi);
if (IS_ERR(stk->backlight)) {
ret = PTR_ERR(stk->backlight);
dev_err(dev, "failed to register backlight %d\n", ret);
return ret;
}
drm_panel_init(&stk->base, &stk->dsi->dev, &stk_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&stk->base);
return 0;
}
static int stk_panel_probe(struct mipi_dsi_device *dsi)
{
struct stk_panel *stk;
int ret;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM);
stk = devm_kzalloc(&dsi->dev, sizeof(*stk), GFP_KERNEL);
if (!stk)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, stk);
stk->dsi = dsi;
ret = stk_panel_add(stk);
if (ret < 0)
return ret;
ret = mipi_dsi_attach(dsi);
if (ret < 0)
drm_panel_remove(&stk->base);
return 0;
}
static void stk_panel_remove(struct mipi_dsi_device *dsi)
{
struct stk_panel *stk = mipi_dsi_get_drvdata(dsi);
int err;
err = mipi_dsi_detach(dsi);
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
err);
drm_panel_remove(&stk->base);
}
static struct mipi_dsi_driver stk_panel_driver = {
.driver = {
.name = "panel-startek-kd070fhfid015",
.of_match_table = stk_of_match,
},
.probe = stk_panel_probe,
.remove = stk_panel_remove,
};
module_mipi_dsi_driver(stk_panel_driver);
MODULE_AUTHOR("Guillaume La Roque <[email protected]>");
MODULE_DESCRIPTION("STARTEK KD070FHFID015");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mantix MLAF057WE51 5.7" MIPI-DSI panel driver
*
* Copyright (C) Purism SPC 2020
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define DRV_NAME "panel-mantix-mlaf057we51"
/* Manufacturer specific Commands send via DSI */
#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
#define MANTIX_CMD_INT_CANCEL 0x4C
#define MANTIX_CMD_SPI_FINISH 0x90
struct mantix {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct gpio_desc *tp_rstn_gpio;
struct regulator *avdd;
struct regulator *avee;
struct regulator *vddi;
const struct drm_display_mode *default_mode;
};
static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
{
return container_of(panel, struct mantix, panel);
}
static int mantix_init_sequence(struct mantix *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
struct device *dev = ctx->dev;
/*
* Init sequence was supplied by the panel vendor.
*/
mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
mipi_dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
mipi_dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
msleep(20);
mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
msleep(20);
dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
static int mantix_enable(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
struct device *dev = ctx->dev;
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
int ret;
ret = mantix_init_sequence(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode\n");
return ret;
}
msleep(20);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret)
return ret;
usleep_range(10000, 12000);
ret = mipi_dsi_turn_on_peripheral(dsi);
if (ret < 0) {
dev_err(dev, "Failed to turn on peripheral\n");
return ret;
}
return 0;
}
static int mantix_disable(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(ctx->dev, "Failed to turn off the display: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
return 0;
}
static int mantix_unprepare(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 1);
usleep_range(5000, 6000);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->avee);
regulator_disable(ctx->avdd);
/* T11 */
usleep_range(5000, 6000);
regulator_disable(ctx->vddi);
/* T14 */
msleep(50);
return 0;
}
static int mantix_prepare(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
int ret;
/* Focaltech FT8006P, section 7.3.1 and 7.3.4 */
dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vddi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vddi supply: %d\n", ret);
return ret;
}
/* T1 + T2 */
usleep_range(8000, 10000);
ret = regulator_enable(ctx->avdd);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable avdd supply: %d\n", ret);
return ret;
}
/* T2d */
usleep_range(3500, 4000);
ret = regulator_enable(ctx->avee);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable avee supply: %d\n", ret);
return ret;
}
/* T3 + T4 + time for voltage to become stable: */
usleep_range(6000, 7000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 0);
/* T6 */
msleep(50);
return 0;
}
static const struct drm_display_mode default_mode_mantix = {
.hdisplay = 720,
.hsync_start = 720 + 45,
.hsync_end = 720 + 45 + 14,
.htotal = 720 + 45 + 14 + 25,
.vdisplay = 1440,
.vsync_start = 1440 + 130,
.vsync_end = 1440 + 130 + 8,
.vtotal = 1440 + 130 + 8 + 106,
.clock = 85298,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 65,
.height_mm = 130,
};
static const struct drm_display_mode default_mode_ys = {
.hdisplay = 720,
.hsync_start = 720 + 45,
.hsync_end = 720 + 45 + 14,
.htotal = 720 + 45 + 14 + 25,
.vdisplay = 1440,
.vsync_start = 1440 + 175,
.vsync_end = 1440 + 175 + 8,
.vtotal = 1440 + 175 + 8 + 50,
.clock = 85298,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 65,
.height_mm = 130,
};
static const u32 mantix_bus_formats[] = {
MEDIA_BUS_FMT_RGB888_1X24,
};
static int mantix_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct mantix *ctx = panel_to_mantix(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, ctx->default_mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
drm_mode_vrefresh(ctx->default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
drm_display_info_set_bus_formats(&connector->display_info,
mantix_bus_formats,
ARRAY_SIZE(mantix_bus_formats));
return 1;
}
static const struct drm_panel_funcs mantix_drm_funcs = {
.disable = mantix_disable,
.unprepare = mantix_unprepare,
.prepare = mantix_prepare,
.enable = mantix_enable,
.get_modes = mantix_get_modes,
};
static int mantix_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct mantix *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
ctx->tp_rstn_gpio = devm_gpiod_get(dev, "mantix,tp-rstn", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->tp_rstn_gpio)) {
dev_err(dev, "cannot get tp-rstn gpio\n");
return PTR_ERR(ctx->tp_rstn_gpio);
}
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
ctx->avdd = devm_regulator_get(dev, "avdd");
if (IS_ERR(ctx->avdd))
return dev_err_probe(dev, PTR_ERR(ctx->avdd), "Failed to request avdd regulator\n");
ctx->avee = devm_regulator_get(dev, "avee");
if (IS_ERR(ctx->avee))
return dev_err_probe(dev, PTR_ERR(ctx->avee), "Failed to request avee regulator\n");
ctx->vddi = devm_regulator_get(dev, "vddi");
if (IS_ERR(ctx->vddi))
return dev_err_probe(dev, PTR_ERR(ctx->vddi), "Failed to request vddi regulator\n");
drm_panel_init(&ctx->panel, dev, &mantix_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "mipi_dsi_attach failed (%d). Is host ready?\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
drm_mode_vrefresh(ctx->default_mode),
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
return 0;
}
static void mantix_shutdown(struct mipi_dsi_device *dsi)
{
struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
drm_panel_unprepare(&ctx->panel);
drm_panel_disable(&ctx->panel);
}
static void mantix_remove(struct mipi_dsi_device *dsi)
{
struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
mantix_shutdown(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id mantix_of_match[] = {
{ .compatible = "mantix,mlaf057we51-x", .data = &default_mode_mantix },
{ .compatible = "ys,ys57pss36bh5gq", .data = &default_mode_ys },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mantix_of_match);
static struct mipi_dsi_driver mantix_driver = {
.probe = mantix_probe,
.remove = mantix_remove,
.shutdown = mantix_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = mantix_of_match,
},
};
module_mipi_dsi_driver(mantix_driver);
MODULE_AUTHOR("Guido Günther <[email protected]>");
MODULE_DESCRIPTION("DRM driver for Mantix MLAF057WE51-X MIPI DSI panel");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NewVision NV3052C IPS LCD panel driver
*
* Copyright (C) 2020, Paul Cercueil <[email protected]>
* Copyright (C) 2022, Christophe Branchereau <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct nv3052c_panel_info {
const struct drm_display_mode *display_modes;
unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
};
struct nv3052c {
struct device *dev;
struct drm_panel panel;
struct mipi_dbi dbi;
const struct nv3052c_panel_info *panel_info;
struct regulator *supply;
struct gpio_desc *reset_gpio;
};
struct nv3052c_reg {
u8 cmd;
u8 val;
};
static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0xff, 0x30 },
{ 0xff, 0x52 },
{ 0xff, 0x01 },
{ 0xe3, 0x00 },
{ 0x40, 0x00 },
{ 0x03, 0x40 },
{ 0x04, 0x00 },
{ 0x05, 0x03 },
{ 0x08, 0x00 },
{ 0x09, 0x07 },
{ 0x0a, 0x01 },
{ 0x0b, 0x32 },
{ 0x0c, 0x32 },
{ 0x0d, 0x0b },
{ 0x0e, 0x00 },
{ 0x23, 0xa0 },
{ 0x24, 0x0c },
{ 0x25, 0x06 },
{ 0x26, 0x14 },
{ 0x27, 0x14 },
{ 0x38, 0xcc },
{ 0x39, 0xd7 },
{ 0x3a, 0x4a },
{ 0x28, 0x40 },
{ 0x29, 0x01 },
{ 0x2a, 0xdf },
{ 0x49, 0x3c },
{ 0x91, 0x77 },
{ 0x92, 0x77 },
{ 0xa0, 0x55 },
{ 0xa1, 0x50 },
{ 0xa4, 0x9c },
{ 0xa7, 0x02 },
{ 0xa8, 0x01 },
{ 0xa9, 0x01 },
{ 0xaa, 0xfc },
{ 0xab, 0x28 },
{ 0xac, 0x06 },
{ 0xad, 0x06 },
{ 0xae, 0x06 },
{ 0xaf, 0x03 },
{ 0xb0, 0x08 },
{ 0xb1, 0x26 },
{ 0xb2, 0x28 },
{ 0xb3, 0x28 },
{ 0xb4, 0x33 },
{ 0xb5, 0x08 },
{ 0xb6, 0x26 },
{ 0xb7, 0x08 },
{ 0xb8, 0x26 },
{ 0xf0, 0x00 },
{ 0xf6, 0xc0 },
{ 0xff, 0x30 },
{ 0xff, 0x52 },
{ 0xff, 0x02 },
{ 0xb0, 0x0b },
{ 0xb1, 0x16 },
{ 0xb2, 0x17 },
{ 0xb3, 0x2c },
{ 0xb4, 0x32 },
{ 0xb5, 0x3b },
{ 0xb6, 0x29 },
{ 0xb7, 0x40 },
{ 0xb8, 0x0d },
{ 0xb9, 0x05 },
{ 0xba, 0x12 },
{ 0xbb, 0x10 },
{ 0xbc, 0x12 },
{ 0xbd, 0x15 },
{ 0xbe, 0x19 },
{ 0xbf, 0x0e },
{ 0xc0, 0x16 },
{ 0xc1, 0x0a },
{ 0xd0, 0x0c },
{ 0xd1, 0x17 },
{ 0xd2, 0x14 },
{ 0xd3, 0x2e },
{ 0xd4, 0x32 },
{ 0xd5, 0x3c },
{ 0xd6, 0x22 },
{ 0xd7, 0x3d },
{ 0xd8, 0x0d },
{ 0xd9, 0x07 },
{ 0xda, 0x13 },
{ 0xdb, 0x13 },
{ 0xdc, 0x11 },
{ 0xdd, 0x15 },
{ 0xde, 0x19 },
{ 0xdf, 0x10 },
{ 0xe0, 0x17 },
{ 0xe1, 0x0a },
{ 0xff, 0x30 },
{ 0xff, 0x52 },
{ 0xff, 0x03 },
{ 0x00, 0x2a },
{ 0x01, 0x2a },
{ 0x02, 0x2a },
{ 0x03, 0x2a },
{ 0x04, 0x61 },
{ 0x05, 0x80 },
{ 0x06, 0xc7 },
{ 0x07, 0x01 },
{ 0x08, 0x03 },
{ 0x09, 0x04 },
{ 0x70, 0x22 },
{ 0x71, 0x80 },
{ 0x30, 0x2a },
{ 0x31, 0x2a },
{ 0x32, 0x2a },
{ 0x33, 0x2a },
{ 0x34, 0x61 },
{ 0x35, 0xc5 },
{ 0x36, 0x80 },
{ 0x37, 0x23 },
{ 0x40, 0x03 },
{ 0x41, 0x04 },
{ 0x42, 0x05 },
{ 0x43, 0x06 },
{ 0x44, 0x11 },
{ 0x45, 0xe8 },
{ 0x46, 0xe9 },
{ 0x47, 0x11 },
{ 0x48, 0xea },
{ 0x49, 0xeb },
{ 0x50, 0x07 },
{ 0x51, 0x08 },
{ 0x52, 0x09 },
{ 0x53, 0x0a },
{ 0x54, 0x11 },
{ 0x55, 0xec },
{ 0x56, 0xed },
{ 0x57, 0x11 },
{ 0x58, 0xef },
{ 0x59, 0xf0 },
{ 0xb1, 0x01 },
{ 0xb4, 0x15 },
{ 0xb5, 0x16 },
{ 0xb6, 0x09 },
{ 0xb7, 0x0f },
{ 0xb8, 0x0d },
{ 0xb9, 0x0b },
{ 0xba, 0x00 },
{ 0xc7, 0x02 },
{ 0xca, 0x17 },
{ 0xcb, 0x18 },
{ 0xcc, 0x0a },
{ 0xcd, 0x10 },
{ 0xce, 0x0e },
{ 0xcf, 0x0c },
{ 0xd0, 0x00 },
{ 0x81, 0x00 },
{ 0x84, 0x15 },
{ 0x85, 0x16 },
{ 0x86, 0x10 },
{ 0x87, 0x0a },
{ 0x88, 0x0c },
{ 0x89, 0x0e },
{ 0x8a, 0x02 },
{ 0x97, 0x00 },
{ 0x9a, 0x17 },
{ 0x9b, 0x18 },
{ 0x9c, 0x0f },
{ 0x9d, 0x09 },
{ 0x9e, 0x0b },
{ 0x9f, 0x0d },
{ 0xa0, 0x01 },
{ 0xff, 0x30 },
{ 0xff, 0x52 },
{ 0xff, 0x02 },
{ 0x01, 0x01 },
{ 0x02, 0xda },
{ 0x03, 0xba },
{ 0x04, 0xa8 },
{ 0x05, 0x9a },
{ 0x06, 0x70 },
{ 0x07, 0xff },
{ 0x08, 0x91 },
{ 0x09, 0x90 },
{ 0x0a, 0xff },
{ 0x0b, 0x8f },
{ 0x0c, 0x60 },
{ 0x0d, 0x58 },
{ 0x0e, 0x48 },
{ 0x0f, 0x38 },
{ 0x10, 0x2b },
{ 0xff, 0x30 },
{ 0xff, 0x52 },
{ 0xff, 0x00 },
{ 0x36, 0x0a },
};
static inline struct nv3052c *to_nv3052c(struct drm_panel *panel)
{
return container_of(panel, struct nv3052c, panel);
}
static int nv3052c_prepare(struct drm_panel *panel)
{
struct nv3052c *priv = to_nv3052c(panel);
struct mipi_dbi *dbi = &priv->dbi;
unsigned int i;
int err;
err = regulator_enable(priv->supply);
if (err) {
dev_err(priv->dev, "Failed to enable power supply: %d\n", err);
return err;
}
/* Reset the chip */
gpiod_set_value_cansleep(priv->reset_gpio, 1);
usleep_range(10, 1000);
gpiod_set_value_cansleep(priv->reset_gpio, 0);
usleep_range(5000, 20000);
for (i = 0; i < ARRAY_SIZE(nv3052c_panel_regs); i++) {
err = mipi_dbi_command(dbi, nv3052c_panel_regs[i].cmd,
nv3052c_panel_regs[i].val);
if (err) {
dev_err(priv->dev, "Unable to set register: %d\n", err);
goto err_disable_regulator;
}
}
err = mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
if (err) {
dev_err(priv->dev, "Unable to exit sleep mode: %d\n", err);
goto err_disable_regulator;
}
return 0;
err_disable_regulator:
regulator_disable(priv->supply);
return err;
}
static int nv3052c_unprepare(struct drm_panel *panel)
{
struct nv3052c *priv = to_nv3052c(panel);
struct mipi_dbi *dbi = &priv->dbi;
int err;
err = mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
if (err)
dev_err(priv->dev, "Unable to enter sleep mode: %d\n", err);
gpiod_set_value_cansleep(priv->reset_gpio, 1);
regulator_disable(priv->supply);
return 0;
}
static int nv3052c_enable(struct drm_panel *panel)
{
struct nv3052c *priv = to_nv3052c(panel);
struct mipi_dbi *dbi = &priv->dbi;
int err;
err = mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
if (err) {
dev_err(priv->dev, "Unable to enable display: %d\n", err);
return err;
}
if (panel->backlight) {
/* Wait for the picture to be ready before enabling backlight */
msleep(120);
}
return 0;
}
static int nv3052c_disable(struct drm_panel *panel)
{
struct nv3052c *priv = to_nv3052c(panel);
struct mipi_dbi *dbi = &priv->dbi;
int err;
err = mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
if (err) {
dev_err(priv->dev, "Unable to disable display: %d\n", err);
return err;
}
return 0;
}
static int nv3052c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct nv3052c *priv = to_nv3052c(panel);
const struct nv3052c_panel_info *panel_info = priv->panel_info;
struct drm_display_mode *mode;
unsigned int i;
for (i = 0; i < panel_info->num_modes; i++) {
mode = drm_mode_duplicate(connector->dev,
&panel_info->display_modes[i]);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER;
if (panel_info->num_modes == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = panel_info->width_mm;
connector->display_info.height_mm = panel_info->height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&panel_info->bus_format, 1);
connector->display_info.bus_flags = panel_info->bus_flags;
return panel_info->num_modes;
}
static const struct drm_panel_funcs nv3052c_funcs = {
.prepare = nv3052c_prepare,
.unprepare = nv3052c_unprepare,
.enable = nv3052c_enable,
.disable = nv3052c_disable,
.get_modes = nv3052c_get_modes,
};
static int nv3052c_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct nv3052c *priv;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->panel_info = of_device_get_match_data(dev);
if (!priv->panel_info)
return -EINVAL;
priv->supply = devm_regulator_get(dev, "power");
if (IS_ERR(priv->supply))
return dev_err_probe(dev, PTR_ERR(priv->supply), "Failed to get power supply\n");
priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO\n");
err = mipi_dbi_spi_init(spi, &priv->dbi, NULL);
if (err)
return dev_err_probe(dev, err, "MIPI DBI init failed\n");
priv->dbi.read_commands = NULL;
spi_set_drvdata(spi, priv);
drm_panel_init(&priv->panel, dev, &nv3052c_funcs,
DRM_MODE_CONNECTOR_DPI);
err = drm_panel_of_backlight(&priv->panel);
if (err)
return dev_err_probe(dev, err, "Failed to attach backlight\n");
drm_panel_add(&priv->panel);
return 0;
}
static void nv3052c_remove(struct spi_device *spi)
{
struct nv3052c *priv = spi_get_drvdata(spi);
drm_panel_remove(&priv->panel);
drm_panel_disable(&priv->panel);
drm_panel_unprepare(&priv->panel);
}
static const struct drm_display_mode ltk035c5444t_modes[] = {
{ /* 60 Hz */
.clock = 24000,
.hdisplay = 640,
.hsync_start = 640 + 96,
.hsync_end = 640 + 96 + 16,
.htotal = 640 + 96 + 16 + 48,
.vdisplay = 480,
.vsync_start = 480 + 5,
.vsync_end = 480 + 5 + 2,
.vtotal = 480 + 5 + 2 + 13,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
{ /* 50 Hz */
.clock = 18000,
.hdisplay = 640,
.hsync_start = 640 + 39,
.hsync_end = 640 + 39 + 2,
.htotal = 640 + 39 + 2 + 39,
.vdisplay = 480,
.vsync_start = 480 + 5,
.vsync_end = 480 + 5 + 2,
.vtotal = 480 + 5 + 2 + 13,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
};
static const struct nv3052c_panel_info ltk035c5444t_panel_info = {
.display_modes = ltk035c5444t_modes,
.num_modes = ARRAY_SIZE(ltk035c5444t_modes),
.width_mm = 77,
.height_mm = 64,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct of_device_id nv3052c_of_match[] = {
{ .compatible = "leadtek,ltk035c5444t", .data = <k035c5444t_panel_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nv3052c_of_match);
static struct spi_driver nv3052c_driver = {
.driver = {
.name = "nv3052c",
.of_match_table = nv3052c_of_match,
},
.probe = nv3052c_probe,
.remove = nv3052c_remove,
};
module_spi_driver(nv3052c_driver);
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_AUTHOR("Christophe Branchereau <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-newvision-nv3052c.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2020 Icenowy Zheng <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define K101_IM2BA02_INIT_CMD_LEN 2
static const char * const regulator_names[] = {
"dvdd",
"avdd",
"cvdd"
};
struct k101_im2ba02 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
struct gpio_desc *reset;
};
static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel)
{
return container_of(panel, struct k101_im2ba02, panel);
}
struct k101_im2ba02_init_cmd {
u8 data[K101_IM2BA02_INIT_CMD_LEN];
};
static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = {
/* Switch to page 0 */
{ .data = { 0xE0, 0x00 } },
/* Seems to be some password */
{ .data = { 0xE1, 0x93} },
{ .data = { 0xE2, 0x65 } },
{ .data = { 0xE3, 0xF8 } },
/* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */
{ .data = { 0x80, 0x03 } },
/* Sequence control */
{ .data = { 0x70, 0x02 } },
{ .data = { 0x71, 0x23 } },
{ .data = { 0x72, 0x06 } },
/* Switch to page 1 */
{ .data = { 0xE0, 0x01 } },
/* Set VCOM */
{ .data = { 0x00, 0x00 } },
{ .data = { 0x01, 0x66 } },
/* Set VCOM_Reverse */
{ .data = { 0x03, 0x00 } },
{ .data = { 0x04, 0x25 } },
/* Set Gamma Power, VG[MS][PN] */
{ .data = { 0x17, 0x00 } },
{ .data = { 0x18, 0x6D } },
{ .data = { 0x19, 0x00 } },
{ .data = { 0x1A, 0x00 } },
{ .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */
{ .data = { 0x1C, 0x00 } },
/* Set Gate Power */
{ .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */
{ .data = { 0x20, 0x28 } }, /* VGL_R = -11V */
{ .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */
{ .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */
/* Set Panel */
{ .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */
/* Set RGBCYC */
{ .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */
{ .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */
{ .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */
{ .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */
{ .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */
{ .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */
{ .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */
/* Set TCON parameter */
{ .data = { 0x40, 0x06 } }, /* RSO = 800 points */
{ .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */
/* Set power voltage */
{ .data = { 0x55, 0x0F } }, /* DCDCM */
{ .data = { 0x56, 0x01 } },
{ .data = { 0x57, 0x69 } },
{ .data = { 0x58, 0x0A } },
{ .data = { 0x59, 0x0A } },
{ .data = { 0x5A, 0x45 } },
{ .data = { 0x5B, 0x15 } },
/* Set gamma */
{ .data = { 0x5D, 0x7C } },
{ .data = { 0x5E, 0x65 } },
{ .data = { 0x5F, 0x55 } },
{ .data = { 0x60, 0x49 } },
{ .data = { 0x61, 0x44 } },
{ .data = { 0x62, 0x35 } },
{ .data = { 0x63, 0x3A } },
{ .data = { 0x64, 0x23 } },
{ .data = { 0x65, 0x3D } },
{ .data = { 0x66, 0x3C } },
{ .data = { 0x67, 0x3D } },
{ .data = { 0x68, 0x5D } },
{ .data = { 0x69, 0x4D } },
{ .data = { 0x6A, 0x56 } },
{ .data = { 0x6B, 0x48 } },
{ .data = { 0x6C, 0x45 } },
{ .data = { 0x6D, 0x38 } },
{ .data = { 0x6E, 0x25 } },
{ .data = { 0x6F, 0x00 } },
{ .data = { 0x70, 0x7C } },
{ .data = { 0x71, 0x65 } },
{ .data = { 0x72, 0x55 } },
{ .data = { 0x73, 0x49 } },
{ .data = { 0x74, 0x44 } },
{ .data = { 0x75, 0x35 } },
{ .data = { 0x76, 0x3A } },
{ .data = { 0x77, 0x23 } },
{ .data = { 0x78, 0x3D } },
{ .data = { 0x79, 0x3C } },
{ .data = { 0x7A, 0x3D } },
{ .data = { 0x7B, 0x5D } },
{ .data = { 0x7C, 0x4D } },
{ .data = { 0x7D, 0x56 } },
{ .data = { 0x7E, 0x48 } },
{ .data = { 0x7F, 0x45 } },
{ .data = { 0x80, 0x38 } },
{ .data = { 0x81, 0x25 } },
{ .data = { 0x82, 0x00 } },
/* Switch to page 2, for GIP */
{ .data = { 0xE0, 0x02 } },
{ .data = { 0x00, 0x1E } },
{ .data = { 0x01, 0x1E } },
{ .data = { 0x02, 0x41 } },
{ .data = { 0x03, 0x41 } },
{ .data = { 0x04, 0x43 } },
{ .data = { 0x05, 0x43 } },
{ .data = { 0x06, 0x1F } },
{ .data = { 0x07, 0x1F } },
{ .data = { 0x08, 0x1F } },
{ .data = { 0x09, 0x1F } },
{ .data = { 0x0A, 0x1E } },
{ .data = { 0x0B, 0x1E } },
{ .data = { 0x0C, 0x1F } },
{ .data = { 0x0D, 0x47 } },
{ .data = { 0x0E, 0x47 } },
{ .data = { 0x0F, 0x45 } },
{ .data = { 0x10, 0x45 } },
{ .data = { 0x11, 0x4B } },
{ .data = { 0x12, 0x4B } },
{ .data = { 0x13, 0x49 } },
{ .data = { 0x14, 0x49 } },
{ .data = { 0x15, 0x1F } },
{ .data = { 0x16, 0x1E } },
{ .data = { 0x17, 0x1E } },
{ .data = { 0x18, 0x40 } },
{ .data = { 0x19, 0x40 } },
{ .data = { 0x1A, 0x42 } },
{ .data = { 0x1B, 0x42 } },
{ .data = { 0x1C, 0x1F } },
{ .data = { 0x1D, 0x1F } },
{ .data = { 0x1E, 0x1F } },
{ .data = { 0x1F, 0x1f } },
{ .data = { 0x20, 0x1E } },
{ .data = { 0x21, 0x1E } },
{ .data = { 0x22, 0x1f } },
{ .data = { 0x23, 0x46 } },
{ .data = { 0x24, 0x46 } },
{ .data = { 0x25, 0x44 } },
{ .data = { 0x26, 0x44 } },
{ .data = { 0x27, 0x4A } },
{ .data = { 0x28, 0x4A } },
{ .data = { 0x29, 0x48 } },
{ .data = { 0x2A, 0x48 } },
{ .data = { 0x2B, 0x1f } },
{ .data = { 0x2C, 0x1F } },
{ .data = { 0x2D, 0x1F } },
{ .data = { 0x2E, 0x42 } },
{ .data = { 0x2F, 0x42 } },
{ .data = { 0x30, 0x40 } },
{ .data = { 0x31, 0x40 } },
{ .data = { 0x32, 0x1E } },
{ .data = { 0x33, 0x1E } },
{ .data = { 0x34, 0x1F } },
{ .data = { 0x35, 0x1F } },
{ .data = { 0x36, 0x1E } },
{ .data = { 0x37, 0x1E } },
{ .data = { 0x38, 0x1F } },
{ .data = { 0x39, 0x48 } },
{ .data = { 0x3A, 0x48 } },
{ .data = { 0x3B, 0x4A } },
{ .data = { 0x3C, 0x4A } },
{ .data = { 0x3D, 0x44 } },
{ .data = { 0x3E, 0x44 } },
{ .data = { 0x3F, 0x46 } },
{ .data = { 0x40, 0x46 } },
{ .data = { 0x41, 0x1F } },
{ .data = { 0x42, 0x1F } },
{ .data = { 0x43, 0x1F } },
{ .data = { 0x44, 0x43 } },
{ .data = { 0x45, 0x43 } },
{ .data = { 0x46, 0x41 } },
{ .data = { 0x47, 0x41 } },
{ .data = { 0x48, 0x1E } },
{ .data = { 0x49, 0x1E } },
{ .data = { 0x4A, 0x1E } },
{ .data = { 0x4B, 0x1F } },
{ .data = { 0x4C, 0x1E } },
{ .data = { 0x4D, 0x1E } },
{ .data = { 0x4E, 0x1F } },
{ .data = { 0x4F, 0x49 } },
{ .data = { 0x50, 0x49 } },
{ .data = { 0x51, 0x4B } },
{ .data = { 0x52, 0x4B } },
{ .data = { 0x53, 0x45 } },
{ .data = { 0x54, 0x45 } },
{ .data = { 0x55, 0x47 } },
{ .data = { 0x56, 0x47 } },
{ .data = { 0x57, 0x1F } },
{ .data = { 0x58, 0x10 } },
{ .data = { 0x59, 0x00 } },
{ .data = { 0x5A, 0x00 } },
{ .data = { 0x5B, 0x30 } },
{ .data = { 0x5C, 0x02 } },
{ .data = { 0x5D, 0x40 } },
{ .data = { 0x5E, 0x01 } },
{ .data = { 0x5F, 0x02 } },
{ .data = { 0x60, 0x30 } },
{ .data = { 0x61, 0x01 } },
{ .data = { 0x62, 0x02 } },
{ .data = { 0x63, 0x6A } },
{ .data = { 0x64, 0x6A } },
{ .data = { 0x65, 0x05 } },
{ .data = { 0x66, 0x12 } },
{ .data = { 0x67, 0x74 } },
{ .data = { 0x68, 0x04 } },
{ .data = { 0x69, 0x6A } },
{ .data = { 0x6A, 0x6A } },
{ .data = { 0x6B, 0x08 } },
{ .data = { 0x6C, 0x00 } },
{ .data = { 0x6D, 0x04 } },
{ .data = { 0x6E, 0x04 } },
{ .data = { 0x6F, 0x88 } },
{ .data = { 0x70, 0x00 } },
{ .data = { 0x71, 0x00 } },
{ .data = { 0x72, 0x06 } },
{ .data = { 0x73, 0x7B } },
{ .data = { 0x74, 0x00 } },
{ .data = { 0x75, 0x07 } },
{ .data = { 0x76, 0x00 } },
{ .data = { 0x77, 0x5D } },
{ .data = { 0x78, 0x17 } },
{ .data = { 0x79, 0x1F } },
{ .data = { 0x7A, 0x00 } },
{ .data = { 0x7B, 0x00 } },
{ .data = { 0x7C, 0x00 } },
{ .data = { 0x7D, 0x03 } },
{ .data = { 0x7E, 0x7B } },
{ .data = { 0xE0, 0x04 } },
{ .data = { 0x2B, 0x2B } },
{ .data = { 0x2E, 0x44 } },
{ .data = { 0xE0, 0x01 } },
{ .data = { 0x0E, 0x01 } },
{ .data = { 0xE0, 0x03 } },
{ .data = { 0x98, 0x2F } },
{ .data = { 0xE0, 0x00 } },
{ .data = { 0xE6, 0x02 } },
{ .data = { 0xE7, 0x02 } },
{ .data = { 0x11, 0x00 } },
};
static const struct k101_im2ba02_init_cmd timed_cmds[] = {
{ .data = { 0x29, 0x00 } },
{ .data = { 0x35, 0x00 } },
};
static int k101_im2ba02_prepare(struct drm_panel *panel)
{
struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
unsigned int i;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
return ret;
msleep(30);
gpiod_set_value(ctx->reset, 1);
msleep(50);
gpiod_set_value(ctx->reset, 0);
msleep(50);
gpiod_set_value(ctx->reset, 1);
msleep(200);
for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) {
const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i];
ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
if (ret < 0)
goto powerdown;
}
return 0;
powerdown:
gpiod_set_value(ctx->reset, 0);
msleep(50);
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static int k101_im2ba02_enable(struct drm_panel *panel)
{
struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1];
int ret;
msleep(150);
ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
if (ret < 0)
return ret;
msleep(50);
return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
}
static int k101_im2ba02_disable(struct drm_panel *panel)
{
struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
return mipi_dsi_dcs_set_display_off(ctx->dsi);
}
static int k101_im2ba02_unprepare(struct drm_panel *panel)
{
struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
int ret;
ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
if (ret < 0)
dev_err(panel->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
if (ret < 0)
dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
msleep(200);
gpiod_set_value(ctx->reset, 0);
msleep(20);
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static const struct drm_display_mode k101_im2ba02_default_mode = {
.clock = 70000,
.hdisplay = 800,
.hsync_start = 800 + 20,
.hsync_end = 800 + 20 + 20,
.htotal = 800 + 20 + 20 + 20,
.vdisplay = 1280,
.vsync_start = 1280 + 16,
.vsync_end = 1280 + 16 + 4,
.vtotal = 1280 + 16 + 4 + 4,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.width_mm = 136,
.height_mm = 217,
};
static int k101_im2ba02_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
if (!mode) {
dev_err(&ctx->dsi->dev, "failed to add mode %ux%u@%u\n",
k101_im2ba02_default_mode.hdisplay,
k101_im2ba02_default_mode.vdisplay,
drm_mode_vrefresh(&k101_im2ba02_default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs k101_im2ba02_funcs = {
.disable = k101_im2ba02_disable,
.unprepare = k101_im2ba02_unprepare,
.prepare = k101_im2ba02_prepare,
.enable = k101_im2ba02_enable,
.get_modes = k101_im2ba02_get_modes,
};
static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
{
struct k101_im2ba02 *ctx;
unsigned int i;
int ret;
ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
return dev_err_probe(&dsi->dev, ret, "Couldn't get regulators\n");
ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Couldn't get our reset GPIO\n");
drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
drm_panel_add(&ctx->panel);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
{
struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id k101_im2ba02_of_match[] = {
{ .compatible = "feixin,k101-im2ba02", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match);
static struct mipi_dsi_driver k101_im2ba02_driver = {
.probe = k101_im2ba02_dsi_probe,
.remove = k101_im2ba02_dsi_remove,
.driver = {
.name = "feixin-k101-im2ba02",
.of_match_table = k101_im2ba02_of_match,
},
};
module_mipi_dsi_driver(k101_im2ba02_driver);
MODULE_AUTHOR("Icenowy Zheng <[email protected]>");
MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MIPI-DSI based s6e3ha2 AMOLED 5.7 inch panel driver.
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Donghwa Lee <[email protected]>
* Hyungwon Hwang <[email protected]>
* Hoegeun Kwon <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define S6E3HA2_MIN_BRIGHTNESS 0
#define S6E3HA2_MAX_BRIGHTNESS 100
#define S6E3HA2_DEFAULT_BRIGHTNESS 80
#define S6E3HA2_NUM_GAMMA_STEPS 46
#define S6E3HA2_GAMMA_CMD_CNT 35
#define S6E3HA2_VINT_STATUS_MAX 10
static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = {
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x82, 0x83,
0x85, 0x88, 0x8b, 0x8b, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8c,
0x94, 0x84, 0xb1, 0xaf, 0x8e, 0xcf, 0xad, 0xc9, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x84, 0x84,
0x85, 0x87, 0x8b, 0x8a, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8a,
0x93, 0x84, 0xb0, 0xae, 0x8e, 0xc9, 0xa8, 0xc5, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
0x85, 0x86, 0x8a, 0x8a, 0x84, 0x88, 0x81, 0x84, 0x8a, 0x88, 0x8a,
0x91, 0x84, 0xb1, 0xae, 0x8b, 0xd5, 0xb2, 0xcc, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
0x85, 0x86, 0x8a, 0x8a, 0x84, 0x87, 0x81, 0x84, 0x8a, 0x87, 0x8a,
0x91, 0x85, 0xae, 0xac, 0x8a, 0xc3, 0xa3, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x85, 0x85,
0x86, 0x85, 0x88, 0x89, 0x84, 0x89, 0x82, 0x84, 0x87, 0x85, 0x8b,
0x91, 0x88, 0xad, 0xab, 0x8a, 0xb7, 0x9b, 0xb6, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
0x85, 0x86, 0x89, 0x8a, 0x84, 0x89, 0x83, 0x83, 0x86, 0x84, 0x8b,
0x90, 0x84, 0xb0, 0xae, 0x8b, 0xce, 0xad, 0xc8, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x89,
0x8f, 0x84, 0xac, 0xaa, 0x89, 0xb1, 0x98, 0xaf, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
0x85, 0x86, 0x88, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8c,
0x91, 0x86, 0xac, 0xaa, 0x89, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x88,
0x8b, 0x82, 0xad, 0xaa, 0x8a, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8a,
0x8e, 0x84, 0xae, 0xac, 0x89, 0xda, 0xb7, 0xd0, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x80, 0x83, 0x82, 0x8b,
0x8e, 0x85, 0xac, 0xaa, 0x89, 0xc8, 0xaa, 0xc1, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x85, 0x86, 0x87, 0x89, 0x81, 0x85, 0x81, 0x84, 0x86, 0x84, 0x8c,
0x8c, 0x84, 0xa9, 0xa8, 0x87, 0xa3, 0x92, 0xa1, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x85, 0x86, 0x87, 0x89, 0x84, 0x86, 0x83, 0x80, 0x83, 0x81, 0x8c,
0x8d, 0x84, 0xaa, 0xaa, 0x89, 0xce, 0xaf, 0xc5, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x85, 0x86, 0x87, 0x89, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
0x8c, 0x84, 0xa8, 0xa8, 0x88, 0xb5, 0x9f, 0xb0, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
0x8b, 0x84, 0xab, 0xa8, 0x86, 0xd4, 0xb4, 0xc9, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x84, 0x84, 0x85, 0x8b,
0x8a, 0x83, 0xa6, 0xa5, 0x84, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
0x86, 0x85, 0x86, 0x86, 0x82, 0x85, 0x81, 0x82, 0x83, 0x84, 0x8e,
0x8b, 0x83, 0xa4, 0xa3, 0x8a, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8e,
0x8b, 0x83, 0xa4, 0xa2, 0x86, 0xc1, 0xa9, 0xb7, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8d,
0x89, 0x82, 0xa2, 0xa1, 0x84, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x83, 0x83, 0x85, 0x8c,
0x87, 0x7f, 0xa2, 0x9d, 0x88, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xbb, 0x00, 0xc5, 0x00, 0xb4, 0x87, 0x86, 0x86, 0x84, 0x83,
0x86, 0x87, 0x87, 0x87, 0x80, 0x82, 0x7f, 0x86, 0x86, 0x88, 0x8a,
0x84, 0x7e, 0x9d, 0x9c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xbd, 0x00, 0xc7, 0x00, 0xb7, 0x87, 0x85, 0x85, 0x84, 0x83,
0x86, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x83, 0x84, 0x85, 0x8a,
0x85, 0x7e, 0x9c, 0x9b, 0x85, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xc0, 0x00, 0xca, 0x00, 0xbb, 0x87, 0x86, 0x85, 0x83, 0x83,
0x85, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x84, 0x85, 0x86, 0x89,
0x83, 0x7d, 0x9c, 0x99, 0x87, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xc4, 0x00, 0xcd, 0x00, 0xbe, 0x87, 0x86, 0x85, 0x83, 0x83,
0x86, 0x85, 0x85, 0x87, 0x81, 0x82, 0x80, 0x82, 0x82, 0x83, 0x8a,
0x85, 0x7f, 0x9f, 0x9b, 0x86, 0xb4, 0xa1, 0xac, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xc7, 0x00, 0xd0, 0x00, 0xc2, 0x87, 0x85, 0x85, 0x83, 0x82,
0x85, 0x85, 0x85, 0x86, 0x82, 0x83, 0x80, 0x82, 0x82, 0x84, 0x87,
0x86, 0x80, 0x9e, 0x9a, 0x87, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xca, 0x00, 0xd2, 0x00, 0xc5, 0x87, 0x85, 0x84, 0x82, 0x82,
0x84, 0x85, 0x85, 0x86, 0x81, 0x82, 0x7f, 0x82, 0x82, 0x84, 0x88,
0x86, 0x81, 0x9d, 0x98, 0x86, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xce, 0x00, 0xd6, 0x00, 0xca, 0x86, 0x85, 0x84, 0x83, 0x83,
0x85, 0x84, 0x84, 0x85, 0x81, 0x82, 0x80, 0x81, 0x81, 0x82, 0x89,
0x86, 0x81, 0x9c, 0x97, 0x86, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xd1, 0x00, 0xd9, 0x00, 0xce, 0x86, 0x84, 0x83, 0x83, 0x82,
0x85, 0x85, 0x85, 0x86, 0x81, 0x83, 0x81, 0x82, 0x82, 0x83, 0x86,
0x83, 0x7f, 0x99, 0x95, 0x86, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xd4, 0x00, 0xdb, 0x00, 0xd1, 0x86, 0x85, 0x83, 0x83, 0x82,
0x85, 0x84, 0x84, 0x85, 0x80, 0x83, 0x82, 0x80, 0x80, 0x81, 0x87,
0x84, 0x81, 0x98, 0x93, 0x85, 0xae, 0x9c, 0xa8, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xd8, 0x00, 0xde, 0x00, 0xd6, 0x86, 0x84, 0x83, 0x81, 0x81,
0x83, 0x85, 0x85, 0x85, 0x82, 0x83, 0x81, 0x81, 0x81, 0x83, 0x86,
0x84, 0x80, 0x98, 0x91, 0x85, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xdc, 0x00, 0xe2, 0x00, 0xda, 0x85, 0x84, 0x83, 0x82, 0x82,
0x84, 0x84, 0x84, 0x85, 0x81, 0x82, 0x82, 0x80, 0x80, 0x81, 0x83,
0x82, 0x7f, 0x99, 0x93, 0x86, 0x94, 0x8b, 0x92, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xdf, 0x00, 0xe5, 0x00, 0xde, 0x85, 0x84, 0x82, 0x82, 0x82,
0x84, 0x83, 0x83, 0x84, 0x81, 0x81, 0x80, 0x83, 0x82, 0x84, 0x82,
0x81, 0x7f, 0x99, 0x92, 0x86, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x83, 0x83, 0x84, 0x80,
0x81, 0x7c, 0x99, 0x92, 0x87, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x85, 0x84, 0x83, 0x81, 0x81,
0x82, 0x82, 0x82, 0x83, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
0x82, 0x80, 0x91, 0x8d, 0x83, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
0x81, 0x7f, 0x91, 0x8c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x82,
0x82, 0x7f, 0x94, 0x89, 0x84, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x83,
0x82, 0x7f, 0x91, 0x85, 0x81, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x83, 0x82, 0x84, 0x83,
0x82, 0x7f, 0x90, 0x84, 0x81, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x80, 0x80,
0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x80, 0x80, 0x81, 0x81,
0x82, 0x83, 0x7e, 0x80, 0x7c, 0xa4, 0x97, 0x9f, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xe9, 0x00, 0xec, 0x00, 0xe8, 0x84, 0x83, 0x82, 0x81, 0x81,
0x82, 0x82, 0x82, 0x83, 0x7f, 0x7f, 0x7f, 0x81, 0x80, 0x82, 0x83,
0x83, 0x84, 0x79, 0x7c, 0x79, 0xb1, 0xa0, 0xaa, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xed, 0x00, 0xf0, 0x00, 0xec, 0x83, 0x83, 0x82, 0x80, 0x80,
0x81, 0x82, 0x82, 0x82, 0x7f, 0x7f, 0x7e, 0x81, 0x81, 0x82, 0x80,
0x81, 0x81, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xf1, 0x00, 0xf4, 0x00, 0xf1, 0x83, 0x82, 0x82, 0x80, 0x80,
0x81, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x7d,
0x7e, 0x7f, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xf6, 0x00, 0xf7, 0x00, 0xf5, 0x82, 0x82, 0x81, 0x80, 0x80,
0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x82,
0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x00, 0xfa, 0x00, 0xfb, 0x00, 0xfa, 0x81, 0x81, 0x81, 0x80, 0x80,
0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 },
{ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00 }
};
static const unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
0x18, 0x19, 0x1a, 0x1b, 0x1c,
0x1d, 0x1e, 0x1f, 0x20, 0x21
};
enum s6e3ha2_type {
HA2_TYPE,
HF2_TYPE,
};
struct s6e3ha2_panel_desc {
const struct drm_display_mode *mode;
enum s6e3ha2_type type;
};
struct s6e3ha2 {
struct device *dev;
struct drm_panel panel;
struct backlight_device *bl_dev;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
struct gpio_desc *enable_gpio;
const struct s6e3ha2_panel_desc *desc;
};
static int s6e3ha2_dcs_write(struct s6e3ha2 *ctx, const void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
return mipi_dsi_dcs_write_buffer(dsi, data, len);
}
#define s6e3ha2_dcs_write_seq_static(ctx, seq...) do { \
static const u8 d[] = { seq }; \
int ret; \
ret = s6e3ha2_dcs_write(ctx, d, ARRAY_SIZE(d)); \
if (ret < 0) \
return ret; \
} while (0)
#define s6e3ha2_call_write_func(ret, func) do { \
ret = (func); \
if (ret < 0) \
return ret; \
} while (0)
static int s6e3ha2_test_key_on_f0(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
return 0;
}
static int s6e3ha2_test_key_off_f0(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0xa5, 0xa5);
return 0;
}
static int s6e3ha2_test_key_on_fc(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
return 0;
}
static int s6e3ha2_test_key_off_fc(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0xa5, 0xa5);
return 0;
}
static int s6e3ha2_single_dsi_set(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67);
s6e3ha2_dcs_write_seq_static(ctx, 0xf9, 0x09);
return 0;
}
static int s6e3ha2_freq_calibration(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xfd, 0x1c);
if (ctx->desc->type == HF2_TYPE)
s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67, 0x40, 0xc5);
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20, 0x39);
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0xa0);
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20);
if (ctx->desc->type == HA2_TYPE)
s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62,
0x40, 0x80, 0xc0, 0x28, 0x28,
0x28, 0x28, 0x39, 0xc5);
else
s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x14, 0x6d,
0x40, 0x80, 0xc0, 0x28, 0x28,
0x28, 0x28, 0x39, 0xc5);
return 0;
}
static int s6e3ha2_aor_control(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb2, 0x03, 0x10);
return 0;
}
static int s6e3ha2_caps_elvss_set(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb6, 0x9c, 0x0a);
return 0;
}
static int s6e3ha2_acl_off(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0x55, 0x00);
return 0;
}
static int s6e3ha2_acl_off_opr(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb5, 0x40);
return 0;
}
static int s6e3ha2_test_global(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x07);
return 0;
}
static int s6e3ha2_test(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb8, 0x19);
return 0;
}
static int s6e3ha2_touch_hsync_on1(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xbd, 0x33, 0x11, 0x02,
0x16, 0x02, 0x16);
return 0;
}
static int s6e3ha2_pentile_control(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xc0, 0x00, 0x00, 0xd8, 0xd8);
return 0;
}
static int s6e3ha2_poc_global(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x20);
return 0;
}
static int s6e3ha2_poc_setting(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x08);
return 0;
}
static int s6e3ha2_pcd_set_off(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xcc, 0x40, 0x51);
return 0;
}
static int s6e3ha2_err_fg_set(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xed, 0x44);
return 0;
}
static int s6e3ha2_hbm_off(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0x53, 0x00);
return 0;
}
static int s6e3ha2_te_start_setting(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xb9, 0x10, 0x09, 0xff, 0x00, 0x09);
return 0;
}
static int s6e3ha2_gamma_update(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x03);
ndelay(100); /* need for 100ns delay */
s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x00);
return 0;
}
static int s6e3ha2_get_brightness(struct backlight_device *bl_dev)
{
return bl_dev->props.brightness;
}
static int s6e3ha2_set_vint(struct s6e3ha2 *ctx)
{
struct backlight_device *bl_dev = ctx->bl_dev;
unsigned int brightness = bl_dev->props.brightness;
unsigned char data[] = { 0xf4, 0x8b,
vint_table[brightness * (S6E3HA2_VINT_STATUS_MAX - 1) /
S6E3HA2_MAX_BRIGHTNESS] };
return s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data));
}
static unsigned int s6e3ha2_get_brightness_index(unsigned int brightness)
{
return (brightness * (S6E3HA2_NUM_GAMMA_STEPS - 1)) /
S6E3HA2_MAX_BRIGHTNESS;
}
static int s6e3ha2_update_gamma(struct s6e3ha2 *ctx, unsigned int brightness)
{
struct backlight_device *bl_dev = ctx->bl_dev;
unsigned int index = s6e3ha2_get_brightness_index(brightness);
u8 data[S6E3HA2_GAMMA_CMD_CNT + 1] = { 0xca, };
int ret;
memcpy(data + 1, gamma_tbl + index, S6E3HA2_GAMMA_CMD_CNT);
s6e3ha2_call_write_func(ret,
s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data)));
s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
bl_dev->props.brightness = brightness;
return 0;
}
static int s6e3ha2_set_brightness(struct backlight_device *bl_dev)
{
struct s6e3ha2 *ctx = bl_get_data(bl_dev);
unsigned int brightness = bl_dev->props.brightness;
int ret;
if (brightness < S6E3HA2_MIN_BRIGHTNESS ||
brightness > bl_dev->props.max_brightness) {
dev_err(ctx->dev, "Invalid brightness: %u\n", brightness);
return -EINVAL;
}
if (bl_dev->props.power > FB_BLANK_NORMAL)
return -EPERM;
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_update_gamma(ctx, brightness));
s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_set_vint(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
return 0;
}
static const struct backlight_ops s6e3ha2_bl_ops = {
.get_brightness = s6e3ha2_get_brightness,
.update_status = s6e3ha2_set_brightness,
};
static int s6e3ha2_panel_init(struct s6e3ha2 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_exit_sleep_mode(dsi));
usleep_range(5000, 6000);
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_single_dsi_set(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_freq_calibration(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
return 0;
}
static int s6e3ha2_power_off(struct s6e3ha2 *ctx)
{
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static int s6e3ha2_disable(struct drm_panel *panel)
{
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_enter_sleep_mode(dsi));
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi));
msleep(40);
ctx->bl_dev->props.power = FB_BLANK_NORMAL;
return 0;
}
static int s6e3ha2_unprepare(struct drm_panel *panel)
{
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
return s6e3ha2_power_off(ctx);
}
static int s6e3ha2_power_on(struct s6e3ha2 *ctx)
{
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
msleep(120);
gpiod_set_value(ctx->enable_gpio, 0);
usleep_range(5000, 6000);
gpiod_set_value(ctx->enable_gpio, 1);
gpiod_set_value(ctx->reset_gpio, 1);
usleep_range(5000, 6000);
gpiod_set_value(ctx->reset_gpio, 0);
usleep_range(5000, 6000);
return 0;
}
static int s6e3ha2_prepare(struct drm_panel *panel)
{
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
int ret;
ret = s6e3ha2_power_on(ctx);
if (ret < 0)
return ret;
ret = s6e3ha2_panel_init(ctx);
if (ret < 0)
goto err;
ctx->bl_dev->props.power = FB_BLANK_NORMAL;
return 0;
err:
s6e3ha2_power_off(ctx);
return ret;
}
static int s6e3ha2_enable(struct drm_panel *panel)
{
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
/* common setting */
s6e3ha2_call_write_func(ret,
mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_touch_hsync_on1(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_pentile_control(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_poc_global(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_poc_setting(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
/* pcd setting off for TB */
s6e3ha2_call_write_func(ret, s6e3ha2_pcd_set_off(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_err_fg_set(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_te_start_setting(ctx));
/* brightness setting */
s6e3ha2_call_write_func(ret, s6e3ha2_set_brightness(ctx->bl_dev));
s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_caps_elvss_set(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_acl_off(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_acl_off_opr(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_hbm_off(ctx));
/* elvss temp compensation */
s6e3ha2_call_write_func(ret, s6e3ha2_test_global(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test(ctx));
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi));
ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
return 0;
}
static const struct drm_display_mode s6e3ha2_mode = {
.clock = 222372,
.hdisplay = 1440,
.hsync_start = 1440 + 1,
.hsync_end = 1440 + 1 + 1,
.htotal = 1440 + 1 + 1 + 1,
.vdisplay = 2560,
.vsync_start = 2560 + 1,
.vsync_end = 2560 + 1 + 1,
.vtotal = 2560 + 1 + 1 + 15,
.flags = 0,
};
static const struct s6e3ha2_panel_desc samsung_s6e3ha2 = {
.mode = &s6e3ha2_mode,
.type = HA2_TYPE,
};
static const struct drm_display_mode s6e3hf2_mode = {
.clock = 247856,
.hdisplay = 1600,
.hsync_start = 1600 + 1,
.hsync_end = 1600 + 1 + 1,
.htotal = 1600 + 1 + 1 + 1,
.vdisplay = 2560,
.vsync_start = 2560 + 1,
.vsync_end = 2560 + 1 + 1,
.vtotal = 2560 + 1 + 1 + 15,
.flags = 0,
};
static const struct s6e3ha2_panel_desc samsung_s6e3hf2 = {
.mode = &s6e3hf2_mode,
.type = HF2_TYPE,
};
static int s6e3ha2_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
drm_mode_vrefresh(ctx->desc->mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 71;
connector->display_info.height_mm = 125;
return 1;
}
static const struct drm_panel_funcs s6e3ha2_drm_funcs = {
.disable = s6e3ha2_disable,
.unprepare = s6e3ha2_unprepare,
.prepare = s6e3ha2_prepare,
.enable = s6e3ha2_enable,
.get_modes = s6e3ha2_get_modes,
};
static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct s6e3ha2 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
ctx->desc = of_device_get_match_data(dev);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
dev_err(dev, "failed to get regulators: %d\n", ret);
return ret;
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
ctx->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->enable_gpio)) {
dev_err(dev, "cannot get enable-gpios %ld\n",
PTR_ERR(ctx->enable_gpio));
return PTR_ERR(ctx->enable_gpio);
}
ctx->bl_dev = backlight_device_register("s6e3ha2", dev, ctx,
&s6e3ha2_bl_ops, NULL);
if (IS_ERR(ctx->bl_dev)) {
dev_err(dev, "failed to register backlight device\n");
return PTR_ERR(ctx->bl_dev);
}
ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS;
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
goto remove_panel;
return ret;
remove_panel:
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
return ret;
}
static void s6e3ha2_remove(struct mipi_dsi_device *dsi)
{
struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
}
static const struct of_device_id s6e3ha2_of_match[] = {
{ .compatible = "samsung,s6e3ha2", .data = &samsung_s6e3ha2 },
{ .compatible = "samsung,s6e3hf2", .data = &samsung_s6e3hf2 },
{ }
};
MODULE_DEVICE_TABLE(of, s6e3ha2_of_match);
static struct mipi_dsi_driver s6e3ha2_driver = {
.probe = s6e3ha2_probe,
.remove = s6e3ha2_remove,
.driver = {
.name = "panel-samsung-s6e3ha2",
.of_match_table = s6e3ha2_of_match,
},
};
module_mipi_dsi_driver(s6e3ha2_driver);
MODULE_AUTHOR("Donghwa Lee <[email protected]>");
MODULE_AUTHOR("Hyungwon Hwang <[email protected]>");
MODULE_AUTHOR("Hoegeun Kwon <[email protected]>");
MODULE_DESCRIPTION("MIPI-DSI based s6e3ha2 AMOLED Panel Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Free Electrons
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#define ST7789V_RAMCTRL_CMD 0xb0
#define ST7789V_RAMCTRL_RM_RGB BIT(4)
#define ST7789V_RAMCTRL_DM_RGB BIT(0)
#define ST7789V_RAMCTRL_MAGIC (3 << 6)
#define ST7789V_RAMCTRL_EPF(n) (((n) & 3) << 4)
#define ST7789V_RGBCTRL_CMD 0xb1
#define ST7789V_RGBCTRL_WO BIT(7)
#define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5)
#define ST7789V_RGBCTRL_VSYNC_HIGH BIT(3)
#define ST7789V_RGBCTRL_HSYNC_HIGH BIT(2)
#define ST7789V_RGBCTRL_PCLK_FALLING BIT(1)
#define ST7789V_RGBCTRL_DE_LOW BIT(0)
#define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f)
#define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f)
#define ST7789V_PORCTRL_CMD 0xb2
#define ST7789V_PORCTRL_IDLE_BP(n) (((n) & 0xf) << 4)
#define ST7789V_PORCTRL_IDLE_FP(n) ((n) & 0xf)
#define ST7789V_PORCTRL_PARTIAL_BP(n) (((n) & 0xf) << 4)
#define ST7789V_PORCTRL_PARTIAL_FP(n) ((n) & 0xf)
#define ST7789V_GCTRL_CMD 0xb7
#define ST7789V_GCTRL_VGHS(n) (((n) & 7) << 4)
#define ST7789V_GCTRL_VGLS(n) ((n) & 7)
#define ST7789V_VCOMS_CMD 0xbb
#define ST7789V_LCMCTRL_CMD 0xc0
#define ST7789V_LCMCTRL_XBGR BIT(5)
#define ST7789V_LCMCTRL_XMX BIT(3)
#define ST7789V_LCMCTRL_XMH BIT(2)
#define ST7789V_VDVVRHEN_CMD 0xc2
#define ST7789V_VDVVRHEN_CMDEN BIT(0)
#define ST7789V_VRHS_CMD 0xc3
#define ST7789V_VDVS_CMD 0xc4
#define ST7789V_FRCTRL2_CMD 0xc6
#define ST7789V_PWCTRL1_CMD 0xd0
#define ST7789V_PWCTRL1_MAGIC 0xa4
#define ST7789V_PWCTRL1_AVDD(n) (((n) & 3) << 6)
#define ST7789V_PWCTRL1_AVCL(n) (((n) & 3) << 4)
#define ST7789V_PWCTRL1_VDS(n) ((n) & 3)
#define ST7789V_PVGAMCTRL_CMD 0xe0
#define ST7789V_PVGAMCTRL_JP0(n) (((n) & 3) << 4)
#define ST7789V_PVGAMCTRL_JP1(n) (((n) & 3) << 4)
#define ST7789V_PVGAMCTRL_VP0(n) ((n) & 0xf)
#define ST7789V_PVGAMCTRL_VP1(n) ((n) & 0x3f)
#define ST7789V_PVGAMCTRL_VP2(n) ((n) & 0x3f)
#define ST7789V_PVGAMCTRL_VP4(n) ((n) & 0x1f)
#define ST7789V_PVGAMCTRL_VP6(n) ((n) & 0x1f)
#define ST7789V_PVGAMCTRL_VP13(n) ((n) & 0xf)
#define ST7789V_PVGAMCTRL_VP20(n) ((n) & 0x7f)
#define ST7789V_PVGAMCTRL_VP27(n) ((n) & 7)
#define ST7789V_PVGAMCTRL_VP36(n) (((n) & 7) << 4)
#define ST7789V_PVGAMCTRL_VP43(n) ((n) & 0x7f)
#define ST7789V_PVGAMCTRL_VP50(n) ((n) & 0xf)
#define ST7789V_PVGAMCTRL_VP57(n) ((n) & 0x1f)
#define ST7789V_PVGAMCTRL_VP59(n) ((n) & 0x1f)
#define ST7789V_PVGAMCTRL_VP61(n) ((n) & 0x3f)
#define ST7789V_PVGAMCTRL_VP62(n) ((n) & 0x3f)
#define ST7789V_PVGAMCTRL_VP63(n) (((n) & 0xf) << 4)
#define ST7789V_NVGAMCTRL_CMD 0xe1
#define ST7789V_NVGAMCTRL_JN0(n) (((n) & 3) << 4)
#define ST7789V_NVGAMCTRL_JN1(n) (((n) & 3) << 4)
#define ST7789V_NVGAMCTRL_VN0(n) ((n) & 0xf)
#define ST7789V_NVGAMCTRL_VN1(n) ((n) & 0x3f)
#define ST7789V_NVGAMCTRL_VN2(n) ((n) & 0x3f)
#define ST7789V_NVGAMCTRL_VN4(n) ((n) & 0x1f)
#define ST7789V_NVGAMCTRL_VN6(n) ((n) & 0x1f)
#define ST7789V_NVGAMCTRL_VN13(n) ((n) & 0xf)
#define ST7789V_NVGAMCTRL_VN20(n) ((n) & 0x7f)
#define ST7789V_NVGAMCTRL_VN27(n) ((n) & 7)
#define ST7789V_NVGAMCTRL_VN36(n) (((n) & 7) << 4)
#define ST7789V_NVGAMCTRL_VN43(n) ((n) & 0x7f)
#define ST7789V_NVGAMCTRL_VN50(n) ((n) & 0xf)
#define ST7789V_NVGAMCTRL_VN57(n) ((n) & 0x1f)
#define ST7789V_NVGAMCTRL_VN59(n) ((n) & 0x1f)
#define ST7789V_NVGAMCTRL_VN61(n) ((n) & 0x3f)
#define ST7789V_NVGAMCTRL_VN62(n) ((n) & 0x3f)
#define ST7789V_NVGAMCTRL_VN63(n) (((n) & 0xf) << 4)
#define ST7789V_TEST(val, func) \
do { \
if ((val = (func))) \
return val; \
} while (0)
#define ST7789V_IDS { 0x85, 0x85, 0x52 }
#define ST7789V_IDS_SIZE 3
struct st7789_panel_info {
const struct drm_display_mode *mode;
u32 bus_format;
u32 bus_flags;
bool invert_mode;
bool partial_mode;
u16 partial_start;
u16 partial_end;
};
struct st7789v {
struct drm_panel panel;
const struct st7789_panel_info *info;
struct spi_device *spi;
struct gpio_desc *reset;
struct regulator *power;
enum drm_panel_orientation orientation;
};
enum st7789v_prefix {
ST7789V_COMMAND = 0,
ST7789V_DATA = 1,
};
static inline struct st7789v *panel_to_st7789v(struct drm_panel *panel)
{
return container_of(panel, struct st7789v, panel);
}
static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix,
u8 data)
{
struct spi_transfer xfer = { };
u16 txbuf = ((prefix & 1) << 8) | data;
xfer.tx_buf = &txbuf;
xfer.len = sizeof(txbuf);
return spi_sync_transfer(ctx->spi, &xfer, 1);
}
static int st7789v_write_command(struct st7789v *ctx, u8 cmd)
{
return st7789v_spi_write(ctx, ST7789V_COMMAND, cmd);
}
static int st7789v_write_data(struct st7789v *ctx, u8 cmd)
{
return st7789v_spi_write(ctx, ST7789V_DATA, cmd);
}
static int st7789v_read_data(struct st7789v *ctx, u8 cmd, u8 *buf,
unsigned int len)
{
struct spi_transfer xfer[2] = { };
struct spi_message msg;
u16 txbuf = ((ST7789V_COMMAND & 1) << 8) | cmd;
u16 rxbuf[4] = {};
u8 bit9 = 0;
int ret, i;
switch (len) {
case 1:
case 3:
case 4:
break;
default:
return -EOPNOTSUPP;
}
spi_message_init(&msg);
xfer[0].tx_buf = &txbuf;
xfer[0].len = sizeof(txbuf);
spi_message_add_tail(&xfer[0], &msg);
xfer[1].rx_buf = rxbuf;
xfer[1].len = len * 2;
spi_message_add_tail(&xfer[1], &msg);
ret = spi_sync(ctx->spi, &msg);
if (ret)
return ret;
for (i = 0; i < len; i++) {
buf[i] = rxbuf[i] >> i | (bit9 << (9 - i));
if (i)
bit9 = rxbuf[i] & GENMASK(i - 1, 0);
}
return 0;
}
static int st7789v_check_id(struct drm_panel *panel)
{
const u8 st7789v_ids[ST7789V_IDS_SIZE] = ST7789V_IDS;
struct st7789v *ctx = panel_to_st7789v(panel);
bool invalid_ids = false;
int ret, i;
u8 ids[3];
if (ctx->spi->mode & SPI_NO_RX)
return 0;
ret = st7789v_read_data(ctx, MIPI_DCS_GET_DISPLAY_ID, ids, ST7789V_IDS_SIZE);
if (ret)
return ret;
for (i = 0; i < ST7789V_IDS_SIZE; i++) {
if (ids[i] != st7789v_ids[i]) {
invalid_ids = true;
break;
}
}
if (invalid_ids)
return -EIO;
return 0;
}
static const struct drm_display_mode default_mode = {
.clock = 7000,
.hdisplay = 240,
.hsync_start = 240 + 38,
.hsync_end = 240 + 38 + 10,
.htotal = 240 + 38 + 10 + 10,
.vdisplay = 320,
.vsync_start = 320 + 8,
.vsync_end = 320 + 8 + 4,
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 61,
.height_mm = 103,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode t28cp45tn89_mode = {
.clock = 6008,
.hdisplay = 240,
.hsync_start = 240 + 38,
.hsync_end = 240 + 38 + 10,
.htotal = 240 + 38 + 10 + 10,
.vdisplay = 320,
.vsync_start = 320 + 8,
.vsync_end = 320 + 8 + 4,
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 57,
.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct drm_display_mode et028013dma_mode = {
.clock = 3000,
.hdisplay = 240,
.hsync_start = 240 + 38,
.hsync_end = 240 + 38 + 10,
.htotal = 240 + 38 + 10 + 10,
.vdisplay = 320,
.vsync_start = 320 + 8,
.vsync_end = 320 + 8 + 4,
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 58,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
.hsync_start = 240 + 28,
.hsync_end = 240 + 28 + 10,
.htotal = 240 + 28 + 10 + 10,
.vdisplay = 280,
.vsync_start = 280 + 8,
.vsync_end = 280 + 8 + 4,
.vtotal = 280 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 37,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct st7789_panel_info default_panel = {
.mode = &default_mode,
.invert_mode = true,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct st7789_panel_info t28cp45tn89_panel = {
.mode = &t28cp45tn89_mode,
.invert_mode = false,
.bus_format = MEDIA_BUS_FMT_RGB565_1X16,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct st7789_panel_info et028013dma_panel = {
.mode = &et028013dma_mode,
.invert_mode = true,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct st7789_panel_info jt240mhqs_hwt_ek_e3_panel = {
.mode = &jt240mhqs_hwt_ek_e3_mode,
.invert_mode = true,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.partial_mode = true,
.partial_start = 38,
.partial_end = 318,
};
static int st7789v_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct st7789v *ctx = panel_to_st7789v(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, ctx->info->mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
ctx->info->mode->hdisplay, ctx->info->mode->vdisplay,
drm_mode_vrefresh(ctx->info->mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
connector->display_info.bpc = 6;
connector->display_info.width_mm = ctx->info->mode->width_mm;
connector->display_info.height_mm = ctx->info->mode->height_mm;
connector->display_info.bus_flags = ctx->info->bus_flags;
drm_display_info_set_bus_formats(&connector->display_info,
&ctx->info->bus_format, 1);
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
static enum drm_panel_orientation st7789v_get_orientation(struct drm_panel *p)
{
struct st7789v *ctx = panel_to_st7789v(p);
return ctx->orientation;
}
static int st7789v_prepare(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
u8 mode, pixel_fmt, polarity;
int ret;
if (!ctx->info->partial_mode)
mode = ST7789V_RGBCTRL_WO;
else
mode = 0;
switch (ctx->info->bus_format) {
case MEDIA_BUS_FMT_RGB666_1X18:
pixel_fmt = MIPI_DCS_PIXEL_FMT_18BIT;
break;
case MEDIA_BUS_FMT_RGB565_1X16:
pixel_fmt = MIPI_DCS_PIXEL_FMT_16BIT;
break;
default:
dev_err(panel->dev, "unsupported bus format: %d\n",
ctx->info->bus_format);
return -EINVAL;
}
pixel_fmt = (pixel_fmt << 4) | pixel_fmt;
polarity = 0;
if (ctx->info->mode->flags & DRM_MODE_FLAG_PVSYNC)
polarity |= ST7789V_RGBCTRL_VSYNC_HIGH;
if (ctx->info->mode->flags & DRM_MODE_FLAG_PHSYNC)
polarity |= ST7789V_RGBCTRL_HSYNC_HIGH;
if (ctx->info->bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE)
polarity |= ST7789V_RGBCTRL_PCLK_FALLING;
if (ctx->info->bus_flags & DRM_BUS_FLAG_DE_LOW)
polarity |= ST7789V_RGBCTRL_DE_LOW;
ret = regulator_enable(ctx->power);
if (ret)
return ret;
gpiod_set_value(ctx->reset, 1);
msleep(30);
gpiod_set_value(ctx->reset, 0);
msleep(120);
/*
* Avoid failing if the IDs are invalid in case the Rx bus width
* description is missing.
*/
ret = st7789v_check_id(panel);
if (ret)
dev_warn(panel->dev, "Unrecognized panel IDs");
ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE));
/* We need to wait 120ms after a sleep out command */
msleep(120);
ST7789V_TEST(ret, st7789v_write_command(ctx,
MIPI_DCS_SET_ADDRESS_MODE));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
ST7789V_TEST(ret, st7789v_write_command(ctx,
MIPI_DCS_SET_PIXEL_FORMAT));
ST7789V_TEST(ret, st7789v_write_data(ctx, pixel_fmt));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PORCTRL_IDLE_BP(3) |
ST7789V_PORCTRL_IDLE_FP(3)));
ST7789V_TEST(ret, st7789v_write_data(ctx,
ST7789V_PORCTRL_PARTIAL_BP(3) |
ST7789V_PORCTRL_PARTIAL_FP(3)));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_GCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_GCTRL_VGLS(5) |
ST7789V_GCTRL_VGHS(3)));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VCOMS_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0x2b));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_LCMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_LCMCTRL_XMH |
ST7789V_LCMCTRL_XMX |
ST7789V_LCMCTRL_XBGR));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVVRHEN_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_VDVVRHEN_CMDEN));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VRHS_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVS_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0x20));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_FRCTRL2_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PWCTRL1_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_MAGIC));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_AVDD(2) |
ST7789V_PWCTRL1_AVCL(2) |
ST7789V_PWCTRL1_VDS(1)));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PVGAMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP63(0xd)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP1(0xca)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP2(0xe)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP4(8)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP6(9)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP13(7)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP20(0x2d)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP27(0xb) |
ST7789V_PVGAMCTRL_VP36(3)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP43(0x3d)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_JP1(3) |
ST7789V_PVGAMCTRL_VP50(4)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP57(0xa)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP59(0xa)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP61(0x1b)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP62(0x28)));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_NVGAMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN63(0xd)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN1(0xca)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN2(0xf)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN4(8)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN6(8)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN13(7)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN20(0x2e)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN27(0xc) |
ST7789V_NVGAMCTRL_VN36(5)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN43(0x40)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_JN1(3) |
ST7789V_NVGAMCTRL_VN50(4)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN57(9)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN59(0xb)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28)));
if (ctx->info->invert_mode) {
ST7789V_TEST(ret, st7789v_write_command(ctx,
MIPI_DCS_ENTER_INVERT_MODE));
} else {
ST7789V_TEST(ret, st7789v_write_command(ctx,
MIPI_DCS_EXIT_INVERT_MODE));
}
if (ctx->info->partial_mode) {
u8 area_data[4] = {
(ctx->info->partial_start >> 8) & 0xff,
(ctx->info->partial_start >> 0) & 0xff,
((ctx->info->partial_end - 1) >> 8) & 0xff,
((ctx->info->partial_end - 1) >> 0) & 0xff,
};
/* Caution: if userspace ever pushes a mode different from the
* expected one (i.e., the one advertised by get_modes), we'll
* add margins.
*/
ST7789V_TEST(ret, st7789v_write_command(
ctx, MIPI_DCS_ENTER_PARTIAL_MODE));
ST7789V_TEST(ret, st7789v_write_command(
ctx, MIPI_DCS_SET_PAGE_ADDRESS));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
ST7789V_TEST(ret, st7789v_write_command(
ctx, MIPI_DCS_SET_PARTIAL_ROWS));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
}
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
ST7789V_RAMCTRL_RM_RGB));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_EPF(3) |
ST7789V_RAMCTRL_MAGIC));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, mode |
ST7789V_RGBCTRL_RCM(2) |
polarity));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20)));
return 0;
}
static int st7789v_enable(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
}
static int st7789v_disable(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
int ret;
ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
return 0;
}
static int st7789v_unprepare(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
int ret;
ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_SLEEP_MODE));
regulator_disable(ctx->power);
return 0;
}
static const struct drm_panel_funcs st7789v_drm_funcs = {
.disable = st7789v_disable,
.enable = st7789v_enable,
.get_modes = st7789v_get_modes,
.get_orientation = st7789v_get_orientation,
.prepare = st7789v_prepare,
.unprepare = st7789v_unprepare,
};
static int st7789v_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct st7789v *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
spi->bits_per_word = 9;
ret = spi_setup(spi);
if (ret < 0)
return dev_err_probe(&spi->dev, ret, "Failed to setup spi\n");
ctx->info = device_get_match_data(&spi->dev);
drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
ctx->power = devm_regulator_get(dev, "power");
ret = PTR_ERR_OR_ZERO(ctx->power);
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulator\n");
ctx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(ctx->reset);
if (ret)
return dev_err_probe(dev, ret, "Failed to get reset line\n");
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
drm_panel_add(&ctx->panel);
return 0;
}
static void st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
}
static const struct spi_device_id st7789v_spi_id[] = {
{ "st7789v", (unsigned long) &default_panel },
{ "t28cp45tn89-v17", (unsigned long) &t28cp45tn89_panel },
{ "et028013dma", (unsigned long) &et028013dma_panel },
{ "jt240mhqs-hwt-ek-e3", (unsigned long) &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(spi, st7789v_spi_id);
static const struct of_device_id st7789v_of_match[] = {
{ .compatible = "sitronix,st7789v", .data = &default_panel },
{ .compatible = "inanbo,t28cp45tn89-v17", .data = &t28cp45tn89_panel },
{ .compatible = "edt,et028013dma", .data = &et028013dma_panel },
{ .compatible = "jasonic,jt240mhqs-hwt-ek-e3",
.data = &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(of, st7789v_of_match);
static struct spi_driver st7789v_driver = {
.probe = st7789v_probe,
.remove = st7789v_remove,
.id_table = st7789v_spi_id,
.driver = {
.name = "st7789v",
.of_match_table = st7789v_of_match,
},
};
module_spi_driver(st7789v_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Sitronix st7789v LCD Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/panel/panel-sitronix-st7789v.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
#include <drm/drm_prime.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
/*
* DRM operations:
*/
static void load_gpu(struct drm_device *dev)
{
struct etnaviv_drm_private *priv = dev->dev_private;
unsigned int i;
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *g = priv->gpu[i];
if (g) {
int ret;
ret = etnaviv_gpu_init(g);
if (ret)
priv->gpu[i] = NULL;
}
}
}
static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx;
int ret, i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx,
xa_limit_32b, &priv->next_context_id, GFP_KERNEL);
if (ret < 0)
goto out_free;
ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
priv->cmdbuf_suballoc);
if (!ctx->mmu) {
ret = -ENOMEM;
goto out_free;
}
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
struct drm_gpu_scheduler *sched;
if (gpu) {
sched = &gpu->sched;
drm_sched_entity_init(&ctx->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
}
}
file->driver_priv = ctx;
return 0;
out_free:
kfree(ctx);
return ret;
}
static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx = file->driver_priv;
unsigned int i;
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
if (gpu)
drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
etnaviv_iommu_context_put(ctx->mmu);
xa_erase(&priv->active_contexts, ctx->id);
kfree(ctx);
}
/*
* DRM debugfs:
*/
#ifdef CONFIG_DEBUG_FS
static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct etnaviv_drm_private *priv = dev->dev_private;
etnaviv_gem_describe_objects(priv, m);
return 0;
}
static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
{
struct drm_printer p = drm_seq_file_printer(m);
read_lock(&dev->vma_offset_manager->vm_lock);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
read_unlock(&dev->vma_offset_manager->vm_lock);
return 0;
}
static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
{
struct drm_printer p = drm_seq_file_printer(m);
struct etnaviv_iommu_context *mmu_context;
seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
/*
* Lock the GPU to avoid a MMU context switch just now and elevate
* the refcount of the current context to avoid it disappearing from
* under our feet.
*/
mutex_lock(&gpu->lock);
mmu_context = gpu->mmu_context;
if (mmu_context)
etnaviv_iommu_context_get(mmu_context);
mutex_unlock(&gpu->lock);
if (!mmu_context)
return 0;
mutex_lock(&mmu_context->lock);
drm_mm_print(&mmu_context->mm, &p);
mutex_unlock(&mmu_context->lock);
etnaviv_iommu_context_put(mmu_context);
return 0;
}
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
{
struct etnaviv_cmdbuf *buf = &gpu->buffer;
u32 size = buf->size;
u32 *ptr = buf->vaddr;
u32 i;
seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
size - buf->user_size);
for (i = 0; i < size / 4; i++) {
if (i && !(i % 4))
seq_puts(m, "\n");
if (i % 4 == 0)
seq_printf(m, "\t0x%p: ", ptr + i);
seq_printf(m, "%08x ", *(ptr + i));
}
seq_puts(m, "\n");
}
static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
{
seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
mutex_lock(&gpu->lock);
etnaviv_buffer_dump(gpu, m);
mutex_unlock(&gpu->lock);
return 0;
}
static int show_unlocked(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
int (*show)(struct drm_device *dev, struct seq_file *m) =
node->info_ent->data;
return show(dev, m);
}
static int show_each_gpu(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_gpu *gpu;
int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
node->info_ent->data;
unsigned int i;
int ret = 0;
for (i = 0; i < ETNA_MAX_PIPES; i++) {
gpu = priv->gpu[i];
if (!gpu)
continue;
ret = show(gpu, m);
if (ret < 0)
break;
}
return ret;
}
static struct drm_info_list etnaviv_debugfs_list[] = {
{"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
{"gem", show_unlocked, 0, etnaviv_gem_show},
{ "mm", show_unlocked, 0, etnaviv_mm_show },
{"mmu", show_each_gpu, 0, etnaviv_mmu_show},
{"ring", show_each_gpu, 0, etnaviv_ring_show},
};
static void etnaviv_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(etnaviv_debugfs_list,
ARRAY_SIZE(etnaviv_debugfs_list),
minor->debugfs_root, minor);
}
#endif
/*
* DRM ioctls:
*/
static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_param *args = data;
struct etnaviv_gpu *gpu;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
return etnaviv_gpu_get_param(gpu, args->param, &args->value);
}
static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_new *args = data;
if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
ETNA_BO_FORCE_MMU))
return -EINVAL;
return etnaviv_gem_new_handle(dev, file, args->size,
args->flags, &args->handle);
}
static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_cpu_prep *args = data;
struct drm_gem_object *obj;
int ret;
if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
return -EINVAL;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
drm_gem_object_put(obj);
return ret;
}
static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_cpu_fini *args = data;
struct drm_gem_object *obj;
int ret;
if (args->flags)
return -EINVAL;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
ret = etnaviv_gem_cpu_fini(obj);
drm_gem_object_put(obj);
return ret;
}
static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_info *args = data;
struct drm_gem_object *obj;
int ret;
if (args->pad)
return -EINVAL;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
ret = etnaviv_gem_mmap_offset(obj, &args->offset);
drm_gem_object_put(obj);
return ret;
}
static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_wait_fence *args = data;
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_timespec *timeout = &args->timeout;
struct etnaviv_gpu *gpu;
if (args->flags & ~(ETNA_WAIT_NONBLOCK))
return -EINVAL;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
if (args->flags & ETNA_WAIT_NONBLOCK)
timeout = NULL;
return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
timeout);
}
static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_userptr *args = data;
if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
args->flags == 0)
return -EINVAL;
if (offset_in_page(args->user_ptr | args->user_size) ||
(uintptr_t)args->user_ptr != args->user_ptr ||
(u32)args->user_size != args->user_size ||
args->user_ptr & ~PAGE_MASK)
return -EINVAL;
if (!access_ok((void __user *)(unsigned long)args->user_ptr,
args->user_size))
return -EFAULT;
return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
args->user_size, args->flags,
&args->handle);
}
static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_wait *args = data;
struct drm_etnaviv_timespec *timeout = &args->timeout;
struct drm_gem_object *obj;
struct etnaviv_gpu *gpu;
int ret;
if (args->flags & ~(ETNA_WAIT_NONBLOCK))
return -EINVAL;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
if (args->flags & ETNA_WAIT_NONBLOCK)
timeout = NULL;
ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
drm_gem_object_put(obj);
return ret;
}
static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_pm_domain *args = data;
struct etnaviv_gpu *gpu;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
return etnaviv_pm_query_dom(gpu, args);
}
static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_pm_signal *args = data;
struct etnaviv_gpu *gpu;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
return etnaviv_pm_query_sig(gpu, args);
}
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
#define ETNA_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_FOPS(fops);
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
.fops = &fops,
.name = "etnaviv",
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
.minor = 3,
};
/*
* Platform driver:
*/
static int etnaviv_bind(struct device *dev)
{
struct etnaviv_drm_private *priv;
struct drm_device *drm;
int ret;
drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev, "failed to allocate private data\n");
ret = -ENOMEM;
goto out_put;
}
drm->dev_private = priv;
dma_set_max_seg_size(dev, SZ_2G);
xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC);
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
if (IS_ERR(priv->cmdbuf_suballoc)) {
dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
ret = PTR_ERR(priv->cmdbuf_suballoc);
goto out_free_priv;
}
dev_set_drvdata(dev, drm);
ret = component_bind_all(dev, drm);
if (ret < 0)
goto out_destroy_suballoc;
load_gpu(drm);
ret = drm_dev_register(drm, 0);
if (ret)
goto out_unbind;
return 0;
out_unbind:
component_unbind_all(dev, drm);
out_destroy_suballoc:
etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
out_free_priv:
kfree(priv);
out_put:
drm_dev_put(drm);
return ret;
}
static void etnaviv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct etnaviv_drm_private *priv = drm->dev_private;
drm_dev_unregister(drm);
component_unbind_all(dev, drm);
etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
xa_destroy(&priv->active_contexts);
drm->dev_private = NULL;
kfree(priv);
drm_dev_put(drm);
}
static const struct component_master_ops etnaviv_master_ops = {
.bind = etnaviv_bind,
.unbind = etnaviv_unbind,
};
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *first_node = NULL;
struct component_match *match = NULL;
if (!dev->platform_data) {
struct device_node *core_node;
for_each_compatible_node(core_node, NULL, "vivante,gc") {
if (!of_device_is_available(core_node))
continue;
if (!first_node)
first_node = core_node;
drm_of_component_match_add(&pdev->dev, &match,
component_compare_of, core_node);
}
} else {
char **names = dev->platform_data;
unsigned i;
for (i = 0; names[i]; i++)
component_match_add(dev, &match, component_compare_dev_name, names[i]);
}
/*
* PTA and MTLB can have 40 bit base addresses, but
* unfortunately, an entry in the MTLB can only point to a
* 32 bit base address of a STLB. Moreover, to initialize the
* MMU we need a command buffer with a 32 bit address because
* without an MMU there is only an indentity mapping between
* the internal 32 bit addresses and the bus addresses.
*
* To make things easy, we set the dma_coherent_mask to 32
* bit to make sure we are allocating the command buffers and
* TLBs in the lower 4 GiB address space.
*/
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_dbg(&pdev->dev, "No suitable DMA available\n");
return -ENODEV;
}
/*
* Apply the same DMA configuration to the virtual etnaviv
* device as the GPU we found. This assumes that all Vivante
* GPUs in the system share the same DMA constraints.
*/
if (first_node)
of_dma_configure(&pdev->dev, first_node, true);
return component_master_add_with_match(dev, &etnaviv_master_ops, match);
}
static int etnaviv_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &etnaviv_master_ops);
return 0;
}
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
.remove = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
},
};
static struct platform_device *etnaviv_drm;
static int __init etnaviv_init(void)
{
struct platform_device *pdev;
int ret;
struct device_node *np;
etnaviv_validate_init();
ret = platform_driver_register(&etnaviv_gpu_driver);
if (ret != 0)
return ret;
ret = platform_driver_register(&etnaviv_platform_driver);
if (ret != 0)
goto unregister_gpu_driver;
/*
* If the DT contains at least one available GPU device, instantiate
* the DRM platform device.
*/
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
if (!pdev) {
ret = -ENOMEM;
of_node_put(np);
goto unregister_platform_driver;
}
ret = platform_device_add(pdev);
if (ret) {
platform_device_put(pdev);
of_node_put(np);
goto unregister_platform_driver;
}
etnaviv_drm = pdev;
of_node_put(np);
break;
}
return 0;
unregister_platform_driver:
platform_driver_unregister(&etnaviv_platform_driver);
unregister_gpu_driver:
platform_driver_unregister(&etnaviv_gpu_driver);
return ret;
}
module_init(etnaviv_init);
static void __exit etnaviv_exit(void)
{
platform_device_unregister(etnaviv_drm);
platform_driver_unregister(&etnaviv_platform_driver);
platform_driver_unregister(&etnaviv_gpu_driver);
}
module_exit(etnaviv_exit);
MODULE_AUTHOR("Christian Gmeiner <[email protected]>");
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_AUTHOR("Lucas Stach <[email protected]>");
MODULE_DESCRIPTION("etnaviv DRM Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:etnaviv");
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014-2018 Etnaviv Project
*/
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "state_hi.xml.h"
#define PT_SIZE SZ_2M
#define PT_ENTRIES (PT_SIZE / sizeof(u32))
#define GPU_MEM_START 0x80000000
struct etnaviv_iommuv1_context {
struct etnaviv_iommu_context base;
u32 *pgtable_cpu;
dma_addr_t pgtable_dma;
};
static struct etnaviv_iommuv1_context *
to_v1_context(struct etnaviv_iommu_context *context)
{
return container_of(context, struct etnaviv_iommuv1_context, base);
}
static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
drm_mm_takedown(&context->mm);
dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
v1_context->pgtable_dma);
context->global->v1.shared_context = NULL;
kfree(v1_context);
}
static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
v1_context->pgtable_cpu[index] = paddr;
return 0;
}
static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
return SZ_4K;
}
static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
{
return PT_SIZE;
}
static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
void *buf)
{
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
}
static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
/* set page table address in MC */
pgtable = (u32)v1_context->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
.dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump,
.restore = etnaviv_iommuv1_restore,
};
struct etnaviv_iommu_context *
etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
{
struct etnaviv_iommuv1_context *v1_context;
struct etnaviv_iommu_context *context;
mutex_lock(&global->lock);
/*
* MMUv1 does not support switching between different contexts without
* a stop the world operation, so we only support a single shared
* context with this version.
*/
if (global->v1.shared_context) {
context = global->v1.shared_context;
etnaviv_iommu_context_get(context);
mutex_unlock(&global->lock);
return context;
}
v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
if (!v1_context) {
mutex_unlock(&global->lock);
return NULL;
}
v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
&v1_context->pgtable_dma,
GFP_KERNEL);
if (!v1_context->pgtable_cpu)
goto out_free;
memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
context = &v1_context->base;
context->global = global;
kref_init(&context->refcount);
mutex_init(&context->lock);
INIT_LIST_HEAD(&context->mappings);
drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
context->global->v1.shared_context = context;
mutex_unlock(&global->lock);
return context;
out_free:
mutex_unlock(&global->lock);
kfree(v1_context);
return NULL;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_iommu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#include <drm/drm_prime.h>
#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
static struct lock_class_key etnaviv_shm_lock_class;
static struct lock_class_key etnaviv_userptr_lock_class;
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
struct drm_device *dev = etnaviv_obj->base.dev;
struct sg_table *sgt = etnaviv_obj->sgt;
/*
* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
{
struct drm_device *dev = etnaviv_obj->base.dev;
struct sg_table *sgt = etnaviv_obj->sgt;
/*
* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*
* WARNING: The DMA API does not support concurrent CPU
* and device access to the memory area. With BIDIRECTIONAL,
* we will clean the cache lines which overlap the region,
* and invalidate all cache lines (partially) contained in
* the region.
*
* If you have dirty data in the overlapping cache lines,
* that will corrupt the GPU-written data. If you have
* written into the remainder of the region, this can
* discard those writes.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* called with etnaviv_obj->lock held */
static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
struct drm_device *dev = etnaviv_obj->base.dev;
struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
if (IS_ERR(p)) {
dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
return PTR_ERR(p);
}
etnaviv_obj->pages = p;
return 0;
}
static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
if (etnaviv_obj->sgt) {
etnaviv_gem_scatterlist_unmap(etnaviv_obj);
sg_free_table(etnaviv_obj->sgt);
kfree(etnaviv_obj->sgt);
etnaviv_obj->sgt = NULL;
}
if (etnaviv_obj->pages) {
drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
true, false);
etnaviv_obj->pages = NULL;
}
}
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
if (!etnaviv_obj->pages) {
ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
if (ret < 0)
return ERR_PTR(ret);
}
if (!etnaviv_obj->sgt) {
struct drm_device *dev = etnaviv_obj->base.dev;
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;
sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
etnaviv_obj->pages, npages);
if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt));
return ERR_CAST(sgt);
}
etnaviv_obj->sgt = sgt;
etnaviv_gem_scatter_map(etnaviv_obj);
}
return etnaviv_obj->pages;
}
void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
lockdep_assert_held(&etnaviv_obj->lock);
/* when we start tracking the pin count, then do something here */
}
static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
pgprot_t vm_page_prot;
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (etnaviv_obj->flags & ETNA_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_page_prot);
} else {
/*
* Shunt off cached objs to shmem file so they have their own
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
vma->vm_pgoff = 0;
vma_set_file(vma, etnaviv_obj->base.filp);
vma->vm_page_prot = vm_page_prot;
}
return 0;
}
static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
}
static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int err;
/*
* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet. Note that vmf_insert_page() is
* specifically coded to take care of this, so we don't have to.
*/
err = mutex_lock_interruptible(&etnaviv_obj->lock);
if (err)
return VM_FAULT_NOPAGE;
/* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
err = PTR_ERR(pages);
return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
return vmf_insert_pfn(vma, vmf->address, pfn);
}
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
{
int ret;
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
if (ret)
dev_err(obj->dev->dev, "could not allocate mmap offset\n");
else
*offset = drm_vma_node_offset_addr(&obj->vma_node);
return ret;
}
static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
struct etnaviv_iommu_context *context)
{
struct etnaviv_vram_mapping *mapping;
list_for_each_entry(mapping, &obj->vram_list, obj_node) {
if (mapping->context == context)
return mapping;
}
return NULL;
}
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
mutex_lock(&etnaviv_obj->lock);
WARN_ON(mapping->use == 0);
mapping->use -= 1;
mutex_unlock(&etnaviv_obj->lock);
drm_gem_object_put(&etnaviv_obj->base);
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
u64 va)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_vram_mapping *mapping;
struct page **pages;
int ret = 0;
mutex_lock(&etnaviv_obj->lock);
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
if (mapping) {
/*
* Holding the object lock prevents the use count changing
* beneath us. If the use count is zero, the MMU might be
* reaping this object, so take the lock and re-check that
* the MMU owns this mapping to close this race.
*/
if (mapping->use == 0) {
mutex_lock(&mmu_context->lock);
if (mapping->context == mmu_context)
if (va && mapping->iova != va) {
etnaviv_iommu_reap_mapping(mapping);
mapping = NULL;
} else {
mapping->use += 1;
}
else
mapping = NULL;
mutex_unlock(&mmu_context->lock);
if (mapping)
goto out;
} else {
mapping->use += 1;
goto out;
}
}
pages = etnaviv_gem_get_pages(etnaviv_obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out;
}
/*
* See if we have a reaped vram mapping we can re-use before
* allocating a fresh mapping.
*/
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
if (!mapping) {
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&mapping->scan_node);
mapping->object = etnaviv_obj;
} else {
list_del(&mapping->obj_node);
}
mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
mmu_context->global->memory_base,
mapping, va);
if (ret < 0)
kfree(mapping);
else
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
out:
mutex_unlock(&etnaviv_obj->lock);
if (ret)
return ERR_PTR(ret);
/* Take a reference on the object */
drm_gem_object_get(obj);
return mapping;
}
void *etnaviv_gem_vmap(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
if (etnaviv_obj->vaddr)
return etnaviv_obj->vaddr;
mutex_lock(&etnaviv_obj->lock);
/*
* Need to check again, as we might have raced with another thread
* while waiting for the mutex.
*/
if (!etnaviv_obj->vaddr)
etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
return etnaviv_obj->vaddr;
}
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
struct page **pages;
lockdep_assert_held(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
if (IS_ERR(pages))
return NULL;
return vmap(pages, obj->base.size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
{
if (op & ETNA_PREP_READ)
return DMA_FROM_DEVICE;
else if (op & ETNA_PREP_WRITE)
return DMA_TO_DEVICE;
else
return DMA_BIDIRECTIONAL;
}
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
int ret;
if (!etnaviv_obj->sgt) {
void *ret;
mutex_lock(&etnaviv_obj->lock);
ret = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(ret))
return PTR_ERR(ret);
}
if (op & ETNA_PREP_NOSYNC) {
if (!dma_resv_test_signaled(obj->resv,
dma_resv_usage_rw(write)))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op;
}
return 0;
}
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}
return 0;
}
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
}
#ifdef CONFIG_DEBUG_FS
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct dma_resv *robj = obj->resv;
unsigned long off = drm_vma_node_start(&obj->vma_node);
int r;
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, etnaviv_obj->vaddr, obj->size);
r = dma_resv_lock(robj, NULL);
if (r)
return;
dma_resv_describe(robj, m);
dma_resv_unlock(robj);
}
void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj;
int count = 0;
size_t size = 0;
mutex_lock(&priv->gem_lock);
list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
struct drm_gem_object *obj = &etnaviv_obj->base;
seq_puts(m, " ");
etnaviv_gem_describe(obj, m);
count++;
size += obj->size;
}
mutex_unlock(&priv->gem_lock);
seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif
static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
{
vunmap(etnaviv_obj->vaddr);
put_pages(etnaviv_obj);
}
static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
.get_pages = etnaviv_gem_shmem_get_pages,
.release = etnaviv_gem_shmem_release,
.vmap = etnaviv_gem_vmap_impl,
.mmap = etnaviv_gem_mmap_obj,
};
void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_drm_private *priv = obj->dev->dev_private;
struct etnaviv_vram_mapping *mapping, *tmp;
/* object should not be active */
WARN_ON(is_active(etnaviv_obj));
mutex_lock(&priv->gem_lock);
list_del(&etnaviv_obj->gem_node);
mutex_unlock(&priv->gem_lock);
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
struct etnaviv_iommu_context *context = mapping->context;
WARN_ON(mapping->use);
if (context)
etnaviv_iommu_unmap_gem(context, mapping);
list_del(&mapping->obj_node);
kfree(mapping);
}
etnaviv_obj->ops->release(etnaviv_obj);
drm_gem_object_release(obj);
kfree(etnaviv_obj);
}
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&priv->gem_lock);
list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
mutex_unlock(&priv->gem_lock);
}
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.free = etnaviv_gem_free_object,
.pin = etnaviv_gem_prime_pin,
.unpin = etnaviv_gem_prime_unpin,
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.mmap = etnaviv_gem_mmap,
.vm_ops = &vm_ops,
};
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
struct etnaviv_gem_object *etnaviv_obj;
unsigned sz = sizeof(*etnaviv_obj);
bool valid = true;
/* validate flags */
switch (flags & ETNA_BO_CACHE_MASK) {
case ETNA_BO_UNCACHED:
case ETNA_BO_CACHED:
case ETNA_BO_WC:
break;
default:
valid = false;
}
if (!valid) {
dev_err(dev->dev, "invalid cache flag: %x\n",
(flags & ETNA_BO_CACHE_MASK));
return -EINVAL;
}
etnaviv_obj = kzalloc(sz, GFP_KERNEL);
if (!etnaviv_obj)
return -ENOMEM;
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
mutex_init(&etnaviv_obj->lock);
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
*obj = &etnaviv_obj->base;
(*obj)->funcs = &etnaviv_gem_object_funcs;
return 0;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_gem_object *obj = NULL;
int ret;
size = PAGE_ALIGN(size);
ret = etnaviv_gem_new_impl(dev, size, flags,
&etnaviv_gem_shmem_ops, &obj);
if (ret)
goto fail;
lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
/*
* Our buffers are kept pinned, so allocating them from the MOVABLE
* zone is a really bad idea, and conflicts with CMA. See comments
* above new_inode() why this is required _and_ expected if you're
* going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
fail:
drm_gem_object_put(obj);
return ret;
}
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
{
struct drm_gem_object *obj;
int ret;
ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
if (ret)
return ret;
drm_gem_private_object_init(dev, obj, size);
*res = to_etnaviv_bo(obj);
return 0;
}
static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
struct page **pvec = NULL;
struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
unsigned int gup_flags = FOLL_LONGTERM;
might_lock_read(¤t->mm->mmap_lock);
if (userptr->mm != current->mm)
return -EPERM;
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return -ENOMEM;
if (!userptr->ro)
gup_flags |= FOLL_WRITE;
do {
unsigned num_pages = npages - pinned;
uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
struct page **pages = pvec + pinned;
ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
if (ret < 0) {
unpin_user_pages(pvec, pinned);
kvfree(pvec);
return ret;
}
pinned += ret;
} while (pinned < npages);
etnaviv_obj->pages = pvec;
return 0;
}
static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
{
if (etnaviv_obj->sgt) {
etnaviv_gem_scatterlist_unmap(etnaviv_obj);
sg_free_table(etnaviv_obj->sgt);
kfree(etnaviv_obj->sgt);
}
if (etnaviv_obj->pages) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
unpin_user_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
}
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
return -EINVAL;
}
static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
.get_pages = etnaviv_gem_userptr_get_pages,
.release = etnaviv_gem_userptr_release,
.vmap = etnaviv_gem_vmap_impl,
.mmap = etnaviv_gem_userptr_mmap_obj,
};
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle)
{
struct etnaviv_gem_object *etnaviv_obj;
int ret;
ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
&etnaviv_gem_userptr_ops, &etnaviv_obj);
if (ret)
return ret;
lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
etnaviv_obj->userptr.ptr = ptr;
etnaviv_obj->userptr.mm = current->mm;
etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&etnaviv_obj->base);
return ret;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_gem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Etnaviv Project
* Copyright (C) 2017 Zodiac Inflight Innovations
*/
#include "common.xml.h"
#include "etnaviv_gpu.h"
#include "etnaviv_perfmon.h"
#include "state_hi.xml.h"
struct etnaviv_pm_domain;
struct etnaviv_pm_signal {
char name[64];
u32 data;
u32 (*sample)(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal);
};
struct etnaviv_pm_domain {
char name[64];
/* profile register */
u32 profile_read;
u32 profile_config;
u8 nr_signals;
const struct etnaviv_pm_signal *signal;
};
struct etnaviv_pm_domain_meta {
unsigned int feature;
const struct etnaviv_pm_domain *domains;
u32 nr_domains;
};
static u32 perf_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
gpu_write(gpu, domain->profile_config, signal->data);
return gpu_read(gpu, domain->profile_read);
}
static inline void pipe_select(struct etnaviv_gpu *gpu, u32 clock, unsigned pipe)
{
clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(pipe);
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
}
static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
u32 value = 0;
unsigned i;
for (i = 0; i < gpu->identity.pixel_pipes; i++) {
pipe_select(gpu, clock, i);
value += perf_reg_read(gpu, domain, signal);
}
/* switch back to pixel pipe 0 to prevent GPU hang */
pipe_select(gpu, clock, 0);
return value;
}
static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
u32 value = 0;
unsigned i;
for (i = 0; i < gpu->identity.pixel_pipes; i++) {
pipe_select(gpu, clock, i);
value += gpu_read(gpu, signal->data);
}
/* switch back to pixel pipe 0 to prevent GPU hang */
pipe_select(gpu, clock, 0);
return value;
}
static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
if (gpu->identity.model == chipModel_GC880 ||
gpu->identity.model == chipModel_GC2000 ||
gpu->identity.model == chipModel_GC2100)
reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
return gpu_read(gpu, reg);
}
static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
{
u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
if (gpu->identity.model == chipModel_GC880 ||
gpu->identity.model == chipModel_GC2000 ||
gpu->identity.model == chipModel_GC2100)
reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
return gpu_read(gpu, reg);
}
static const struct etnaviv_pm_domain doms_3d[] = {
{
.name = "HI",
.profile_read = VIVS_MC_PROFILE_HI_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG2,
.nr_signals = 7,
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_READ_BYTES8",
VIVS_HI_PROFILE_READ_BYTES8,
&pipe_reg_read,
},
{
"TOTAL_WRITE_BYTES8",
VIVS_HI_PROFILE_WRITE_BYTES8,
&pipe_reg_read,
},
{
"TOTAL_CYCLES",
0,
&hi_total_cycle_read
},
{
"IDLE_CYCLES",
0,
&hi_total_idle_cycle_read
},
{
"AXI_CYCLES_READ_REQUEST_STALLED",
VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED,
&perf_reg_read
},
{
"AXI_CYCLES_WRITE_REQUEST_STALLED",
VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED,
&perf_reg_read
},
{
"AXI_CYCLES_WRITE_DATA_STALLED",
VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED,
&perf_reg_read
}
}
},
{
.name = "PE",
.profile_read = VIVS_MC_PROFILE_PE_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG0,
.nr_signals = 4,
.signal = (const struct etnaviv_pm_signal[]) {
{
"PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE,
&pipe_perf_reg_read
},
{
"PIXEL_COUNT_KILLED_BY_DEPTH_PIPE",
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE,
&pipe_perf_reg_read
},
{
"PIXEL_COUNT_DRAWN_BY_COLOR_PIPE",
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE,
&pipe_perf_reg_read
},
{
"PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE",
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE,
&pipe_perf_reg_read
}
}
},
{
.name = "SH",
.profile_read = VIVS_MC_PROFILE_SH_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG0,
.nr_signals = 9,
.signal = (const struct etnaviv_pm_signal[]) {
{
"SHADER_CYCLES",
VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES,
&perf_reg_read
},
{
"PS_INST_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER,
&perf_reg_read
},
{
"RENDERED_PIXEL_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER,
&perf_reg_read
},
{
"VS_INST_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER,
&pipe_perf_reg_read
},
{
"RENDERED_VERTICE_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER,
&pipe_perf_reg_read
},
{
"VTX_BRANCH_INST_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER,
&pipe_perf_reg_read
},
{
"VTX_TEXLD_INST_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER,
&pipe_perf_reg_read
},
{
"PXL_BRANCH_INST_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER,
&pipe_perf_reg_read
},
{
"PXL_TEXLD_INST_COUNTER",
VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER,
&pipe_perf_reg_read
}
}
},
{
.name = "PA",
.profile_read = VIVS_MC_PROFILE_PA_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG1,
.nr_signals = 6,
.signal = (const struct etnaviv_pm_signal[]) {
{
"INPUT_VTX_COUNTER",
VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER,
&perf_reg_read
},
{
"INPUT_PRIM_COUNTER",
VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER,
&perf_reg_read
},
{
"OUTPUT_PRIM_COUNTER",
VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER,
&perf_reg_read
},
{
"DEPTH_CLIPPED_COUNTER",
VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER,
&pipe_perf_reg_read
},
{
"TRIVIAL_REJECTED_COUNTER",
VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER,
&pipe_perf_reg_read
},
{
"CULLED_COUNTER",
VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER,
&pipe_perf_reg_read
}
}
},
{
.name = "SE",
.profile_read = VIVS_MC_PROFILE_SE_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG1,
.nr_signals = 2,
.signal = (const struct etnaviv_pm_signal[]) {
{
"CULLED_TRIANGLE_COUNT",
VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT,
&perf_reg_read
},
{
"CULLED_LINES_COUNT",
VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT,
&perf_reg_read
}
}
},
{
.name = "RA",
.profile_read = VIVS_MC_PROFILE_RA_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG1,
.nr_signals = 7,
.signal = (const struct etnaviv_pm_signal[]) {
{
"VALID_PIXEL_COUNT",
VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT,
&perf_reg_read
},
{
"TOTAL_QUAD_COUNT",
VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT,
&perf_reg_read
},
{
"VALID_QUAD_COUNT_AFTER_EARLY_Z",
VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z,
&perf_reg_read
},
{
"TOTAL_PRIMITIVE_COUNT",
VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT,
&perf_reg_read
},
{
"PIPE_CACHE_MISS_COUNTER",
VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER,
&perf_reg_read
},
{
"PREFETCH_CACHE_MISS_COUNTER",
VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER,
&perf_reg_read
},
{
"CULLED_QUAD_COUNT",
VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT,
&perf_reg_read
}
}
},
{
.name = "TX",
.profile_read = VIVS_MC_PROFILE_TX_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG1,
.nr_signals = 9,
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_BILINEAR_REQUESTS",
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS,
&perf_reg_read
},
{
"TOTAL_TRILINEAR_REQUESTS",
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS,
&perf_reg_read
},
{
"TOTAL_DISCARDED_TEXTURE_REQUESTS",
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS,
&perf_reg_read
},
{
"TOTAL_TEXTURE_REQUESTS",
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS,
&perf_reg_read
},
{
"MEM_READ_COUNT",
VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT,
&perf_reg_read
},
{
"MEM_READ_IN_8B_COUNT",
VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT,
&perf_reg_read
},
{
"CACHE_MISS_COUNT",
VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT,
&perf_reg_read
},
{
"CACHE_HIT_TEXEL_COUNT",
VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT,
&perf_reg_read
},
{
"CACHE_MISS_TEXEL_COUNT",
VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT,
&perf_reg_read
}
}
},
{
.name = "MC",
.profile_read = VIVS_MC_PROFILE_MC_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG2,
.nr_signals = 3,
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_READ_REQ_8B_FROM_PIPELINE",
VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE,
&perf_reg_read
},
{
"TOTAL_READ_REQ_8B_FROM_IP",
VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP,
&perf_reg_read
},
{
"TOTAL_WRITE_REQ_8B_FROM_PIPELINE",
VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE,
&perf_reg_read
}
}
}
};
static const struct etnaviv_pm_domain doms_2d[] = {
{
.name = "PE",
.profile_read = VIVS_MC_PROFILE_PE_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG0,
.nr_signals = 1,
.signal = (const struct etnaviv_pm_signal[]) {
{
"PIXELS_RENDERED_2D",
VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D,
&pipe_perf_reg_read
}
}
}
};
static const struct etnaviv_pm_domain doms_vg[] = {
};
static const struct etnaviv_pm_domain_meta doms_meta[] = {
{
.feature = chipFeatures_PIPE_3D,
.nr_domains = ARRAY_SIZE(doms_3d),
.domains = &doms_3d[0]
},
{
.feature = chipFeatures_PIPE_2D,
.nr_domains = ARRAY_SIZE(doms_2d),
.domains = &doms_2d[0]
},
{
.feature = chipFeatures_PIPE_VG,
.nr_domains = ARRAY_SIZE(doms_vg),
.domains = &doms_vg[0]
}
};
static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
{
unsigned int num = 0, i;
for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
if (gpu->identity.features & meta->feature)
num += meta->nr_domains;
}
return num;
}
static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
unsigned int index)
{
const struct etnaviv_pm_domain *domain = NULL;
unsigned int offset = 0, i;
for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
if (!(gpu->identity.features & meta->feature))
continue;
if (index - offset >= meta->nr_domains) {
offset += meta->nr_domains;
continue;
}
domain = meta->domains + (index - offset);
}
return domain;
}
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_domain *domain)
{
const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
if (domain->iter >= nr_domains)
return -EINVAL;
dom = pm_domain(gpu, domain->iter);
if (!dom)
return -EINVAL;
domain->id = domain->iter;
domain->nr_signals = dom->nr_signals;
strncpy(domain->name, dom->name, sizeof(domain->name));
domain->iter++;
if (domain->iter == nr_domains)
domain->iter = 0xff;
return 0;
}
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_signal *signal)
{
const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
if (signal->domain >= nr_domains)
return -EINVAL;
dom = pm_domain(gpu, signal->domain);
if (!dom)
return -EINVAL;
if (signal->iter >= dom->nr_signals)
return -EINVAL;
sig = &dom->signal[signal->iter];
signal->id = signal->iter;
strncpy(signal->name, sig->name, sizeof(signal->name));
signal->iter++;
if (signal->iter == dom->nr_signals)
signal->iter = 0xffff;
return 0;
}
int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
u32 exec_state)
{
const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
const struct etnaviv_pm_domain *dom;
if (r->domain >= meta->nr_domains)
return -EINVAL;
dom = meta->domains + r->domain;
if (r->signal >= dom->nr_signals)
return -EINVAL;
return 0;
}
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
const struct etnaviv_perfmon_request *pmr, u32 exec_state)
{
const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
u32 *bo = pmr->bo_vma;
u32 val;
dom = meta->domains + pmr->domain;
sig = &dom->signal[pmr->signal];
val = sig->sample(gpu, dom, sig);
*(bo + pmr->offset) = val;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_perfmon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#include <linux/devcoredump.h>
#include <linux/moduleparam.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_sched.h"
#include "state.xml.h"
#include "state_hi.xml.h"
static bool etnaviv_dump_core = true;
module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
struct core_dump_iterator {
void *start;
struct etnaviv_dump_object_header *hdr;
void *data;
};
static const unsigned short etnaviv_dump_registers[] = {
VIVS_HI_AXI_STATUS,
VIVS_HI_CLOCK_CONTROL,
VIVS_HI_IDLE_STATE,
VIVS_HI_AXI_CONFIG,
VIVS_HI_INTR_ENBL,
VIVS_HI_CHIP_IDENTITY,
VIVS_HI_CHIP_FEATURE,
VIVS_HI_CHIP_MODEL,
VIVS_HI_CHIP_REV,
VIVS_HI_CHIP_DATE,
VIVS_HI_CHIP_TIME,
VIVS_HI_CHIP_MINOR_FEATURE_0,
VIVS_HI_CACHE_CONTROL,
VIVS_HI_AXI_CONTROL,
VIVS_PM_POWER_CONTROLS,
VIVS_PM_MODULE_CONTROLS,
VIVS_PM_MODULE_STATUS,
VIVS_PM_PULSE_EATER,
VIVS_MC_MMU_FE_PAGE_TABLE,
VIVS_MC_MMU_TX_PAGE_TABLE,
VIVS_MC_MMU_PE_PAGE_TABLE,
VIVS_MC_MMU_PEZ_PAGE_TABLE,
VIVS_MC_MMU_RA_PAGE_TABLE,
VIVS_MC_DEBUG_MEMORY,
VIVS_MC_MEMORY_BASE_ADDR_RA,
VIVS_MC_MEMORY_BASE_ADDR_FE,
VIVS_MC_MEMORY_BASE_ADDR_TX,
VIVS_MC_MEMORY_BASE_ADDR_PEZ,
VIVS_MC_MEMORY_BASE_ADDR_PE,
VIVS_MC_MEMORY_TIMING_CONTROL,
VIVS_MC_BUS_CONFIG,
VIVS_FE_DMA_STATUS,
VIVS_FE_DMA_DEBUG_STATE,
VIVS_FE_DMA_ADDRESS,
VIVS_FE_DMA_LOW,
VIVS_FE_DMA_HIGH,
VIVS_FE_AUTO_FLUSH,
};
static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
u32 type, void *data_end)
{
struct etnaviv_dump_object_header *hdr = iter->hdr;
hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
hdr->type = cpu_to_le32(type);
hdr->file_offset = cpu_to_le32(iter->data - iter->start);
hdr->file_size = cpu_to_le32(data_end - iter->data);
iter->hdr++;
iter->data += le32_to_cpu(hdr->file_size);
}
static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
struct etnaviv_gpu *gpu)
{
struct etnaviv_dump_registers *reg = iter->data;
unsigned int i;
u32 read_addr;
for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
read_addr = etnaviv_dump_registers[i];
if (read_addr >= VIVS_PM_POWER_CONTROLS &&
read_addr <= VIVS_PM_PULSE_EATER)
read_addr = gpu_fix_power_address(gpu, read_addr);
reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
}
etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
}
static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
struct etnaviv_iommu_context *mmu, size_t mmu_size)
{
etnaviv_iommu_dump(mmu, iter->data);
etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
}
static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
void *ptr, size_t size, u64 iova)
{
memcpy(iter->data, ptr, size);
iter->hdr->iova = cpu_to_le64(iova);
etnaviv_core_dump_header(iter, type, iter->data + size);
}
void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
struct core_dump_iterator iter;
struct etnaviv_gem_object *obj;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
int i;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
return;
etnaviv_dump_core = false;
mutex_lock(&submit->mmu_context->lock);
mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
n_obj = 5;
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
mmu_size + gpu->buffer.size + submit->cmdbuf.size;
/* Add in the active buffer objects */
for (i = 0; i < submit->nr_bos; i++) {
obj = submit->bos[i].obj;
file_size += obj->base.size;
n_bomap_pages += obj->base.size >> PAGE_SHIFT;
n_obj++;
}
/* If we have any buffer objects, add a bomap object */
if (n_bomap_pages) {
file_size += n_bomap_pages * sizeof(__le64);
n_obj++;
}
/* Add the size of the headers */
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
if (!iter.start) {
mutex_unlock(&submit->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
/* Point the data member after the headers */
iter.hdr = iter.start;
iter.data = &iter.hdr[n_obj];
memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer,
&submit->mmu_context->cmdbuf_mapping));
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
&submit->mmu_context->cmdbuf_mapping));
mutex_unlock(&submit->mmu_context->lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
bomap_start = bomap = iter.data;
memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
bomap + n_bomap_pages);
} else {
/* Silence warning */
bomap_start = bomap = NULL;
}
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_vram_mapping *vram;
struct page **pages;
void *vaddr;
obj = submit->bos[i].obj;
vram = submit->bos[i].mapping;
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
mutex_unlock(&obj->lock);
if (!IS_ERR(pages)) {
int j;
iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
*bomap++ = cpu_to_le64(page_to_phys(*pages++));
}
iter.hdr->iova = cpu_to_le64(vram->iova);
vaddr = etnaviv_gem_vmap(&obj->base);
if (vaddr)
memcpy(iter.data, vaddr, obj->base.size);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
obj->base.size);
}
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_dump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#include <linux/kernel.h>
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "cmdstream.xml.h"
#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
struct etna_validation_state {
struct etnaviv_gpu *gpu;
const struct drm_etnaviv_gem_submit_reloc *relocs;
unsigned int num_relocs;
u32 *start;
};
static const struct {
u16 offset;
u16 size;
} etnaviv_sensitive_states[] __initconst = {
#define ST(start, num) { (start) >> 2, (num) }
/* 2D */
ST(0x1200, 1),
ST(0x1228, 1),
ST(0x1238, 1),
ST(0x1284, 1),
ST(0x128c, 1),
ST(0x1304, 1),
ST(0x1310, 1),
ST(0x1318, 1),
ST(0x12800, 4),
ST(0x128a0, 4),
ST(0x128c0, 4),
ST(0x12970, 4),
ST(0x12a00, 8),
ST(0x12b40, 8),
ST(0x12b80, 8),
ST(0x12ce0, 8),
/* 3D */
ST(0x0644, 1),
ST(0x064c, 1),
ST(0x0680, 8),
ST(0x086c, 1),
ST(0x1028, 1),
ST(0x1410, 1),
ST(0x1430, 1),
ST(0x1458, 1),
ST(0x1460, 8),
ST(0x1480, 8),
ST(0x1500, 8),
ST(0x1520, 8),
ST(0x1608, 1),
ST(0x1610, 1),
ST(0x1658, 1),
ST(0x165c, 1),
ST(0x1664, 1),
ST(0x1668, 1),
ST(0x16a4, 1),
ST(0x16c0, 8),
ST(0x16e0, 8),
ST(0x1740, 8),
ST(0x17c0, 8),
ST(0x17e0, 8),
ST(0x2400, 14 * 16),
ST(0x3824, 1),
ST(0x10800, 32 * 16),
ST(0x14600, 16),
ST(0x14800, 8 * 8),
#undef ST
};
#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
void __init etnaviv_validate_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
etnaviv_sensitive_states[i].size);
}
static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
unsigned int buf_offset, unsigned int state_addr)
{
if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
dev_warn_once(state->gpu->dev,
"%s: relocation for non-sensitive state 0x%x at offset %u\n",
__func__, state_addr,
state->relocs->submit_offset);
while (state->num_relocs &&
state->relocs->submit_offset < buf_offset) {
state->relocs++;
state->num_relocs--;
}
}
}
static bool etnaviv_validate_load_state(struct etna_validation_state *state,
u32 *ptr, unsigned int state_offset, unsigned int num)
{
unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
unsigned int st_offset = state_offset, buf_offset;
for_each_set_bit_from(st_offset, etnaviv_states, size) {
buf_offset = (ptr - state->start +
st_offset - state_offset) * 4;
etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
if (state->num_relocs &&
state->relocs->submit_offset == buf_offset) {
state->relocs++;
state->num_relocs--;
continue;
}
dev_warn_ratelimited(state->gpu->dev,
"%s: load state touches restricted state 0x%x at offset %u\n",
__func__, st_offset * 4, buf_offset);
return false;
}
if (state->num_relocs) {
buf_offset = (ptr - state->start + num) * 4;
etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
state->relocs->submit_offset -
buf_offset);
}
return true;
}
static uint8_t cmd_length[32] = {
[FE_OPCODE_DRAW_PRIMITIVES] = 4,
[FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
[FE_OPCODE_DRAW_INSTANCED] = 4,
[FE_OPCODE_NOP] = 2,
[FE_OPCODE_STALL] = 2,
};
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
unsigned int size,
struct drm_etnaviv_gem_submit_reloc *relocs,
unsigned int reloc_size)
{
struct etna_validation_state state;
u32 *buf = stream;
u32 *end = buf + size;
state.gpu = gpu;
state.relocs = relocs;
state.num_relocs = reloc_size;
state.start = stream;
while (buf < end) {
u32 cmd = *buf;
unsigned int len, n, off;
unsigned int op = cmd >> 27;
switch (op) {
case FE_OPCODE_LOAD_STATE:
n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
len = ALIGN(1 + n, 2);
if (buf + len > end)
break;
off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
if (!etnaviv_validate_load_state(&state, buf + 1,
off, n))
return false;
break;
case FE_OPCODE_DRAW_2D:
n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
if (n == 0)
n = 256;
len = 2 + n * 2;
break;
default:
len = cmd_length[op];
if (len == 0) {
dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
__func__, op, buf - state.start);
return false;
}
break;
}
buf += len;
}
if (buf > end) {
dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
__func__, buf - state.start, size);
return false;
}
return true;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2018 Etnaviv Project
*/
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "state.xml.h"
#include "state_hi.xml.h"
#define MMUv2_PTE_PRESENT BIT(0)
#define MMUv2_PTE_EXCEPTION BIT(1)
#define MMUv2_PTE_WRITEABLE BIT(2)
#define MMUv2_MTLB_MASK 0xffc00000
#define MMUv2_MTLB_SHIFT 22
#define MMUv2_STLB_MASK 0x003ff000
#define MMUv2_STLB_SHIFT 12
#define MMUv2_MAX_STLB_ENTRIES 1024
struct etnaviv_iommuv2_context {
struct etnaviv_iommu_context base;
unsigned short id;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
/* S(lave) TLB aka second level pagetable */
u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
};
static struct etnaviv_iommuv2_context *
to_v2_context(struct etnaviv_iommu_context *context)
{
return container_of(context, struct etnaviv_iommuv2_context, base);
}
static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int i;
drm_mm_takedown(&context->mm);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (v2_context->stlb_cpu[i])
dma_free_wc(context->global->dev, SZ_4K,
v2_context->stlb_cpu[i],
v2_context->stlb_dma[i]);
}
dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
v2_context->mtlb_dma);
clear_bit(v2_context->id, context->global->v2.pta_alloc);
vfree(v2_context);
}
static int
etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
int stlb)
{
if (v2_context->stlb_cpu[stlb])
return 0;
v2_context->stlb_cpu[stlb] =
dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
&v2_context->stlb_dma[stlb],
GFP_KERNEL);
if (!v2_context->stlb_cpu[stlb])
return -ENOMEM;
memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
SZ_4K / sizeof(u32));
v2_context->mtlb_cpu[stlb] =
v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
return 0;
}
static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int mtlb_entry, stlb_entry, ret;
u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
if (size != SZ_4K)
return -EINVAL;
if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
entry |= (upper_32_bits(paddr) & 0xff) << 4;
if (prot & ETNAVIV_PROT_WRITE)
entry |= MMUv2_PTE_WRITEABLE;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
if (ret)
return ret;
v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
return 0;
}
static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
int mtlb_entry, stlb_entry;
if (size != SZ_4K)
return -EINVAL;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
return SZ_4K;
}
static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
size_t dump_size = SZ_4K;
int i;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
dump_size += SZ_4K;
return dump_size;
}
static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int i;
memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
buf += SZ_4K;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
buf += SZ_4K;
}
}
static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
upper_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
lower_32_bits(context->global->bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
lower_32_bits(context->global->bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
upper_32_bits(context->global->bad_page_dma)) |
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
upper_32_bits(context->global->bad_page_dma)));
context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
/* trigger a PTA load through the FE */
prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
}
u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
return v2_context->mtlb_dma;
}
unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
{
struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
return v2_context->id;
}
static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
switch (gpu->sec_mode) {
case ETNA_SEC_NONE:
etnaviv_iommuv2_restore_nonsec(gpu, context);
break;
case ETNA_SEC_KERNEL:
etnaviv_iommuv2_restore_sec(gpu, context);
break;
default:
WARN(1, "unhandled GPU security mode\n");
break;
}
}
const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
.free = etnaviv_iommuv2_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
.dump_size = etnaviv_iommuv2_dump_size,
.dump = etnaviv_iommuv2_dump,
.restore = etnaviv_iommuv2_restore,
};
struct etnaviv_iommu_context *
etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
{
struct etnaviv_iommuv2_context *v2_context;
struct etnaviv_iommu_context *context;
v2_context = vzalloc(sizeof(*v2_context));
if (!v2_context)
return NULL;
mutex_lock(&global->lock);
v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
ETNAVIV_PTA_ENTRIES);
if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
set_bit(v2_context->id, global->v2.pta_alloc);
} else {
mutex_unlock(&global->lock);
goto out_free;
}
mutex_unlock(&global->lock);
v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
&v2_context->mtlb_dma, GFP_KERNEL);
if (!v2_context->mtlb_cpu)
goto out_free_id;
memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
MMUv2_MAX_STLB_ENTRIES);
global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
context = &v2_context->base;
context->global = global;
kref_init(&context->refcount);
mutex_init(&context->lock);
INIT_LIST_HEAD(&context->mappings);
drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
return context;
out_free_id:
clear_bit(v2_context->id, global->v2.pta_alloc);
out_free:
vfree(v2_context);
return NULL;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
if (!IS_ALIGNED(iova | size, pgsize)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
iova, size, pgsize);
return;
}
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
if (!unmapped_page)
break;
iova += unmapped_page;
unmapped += unmapped_page;
}
}
static int etnaviv_context_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
unsigned long orig_iova = iova;
size_t pgsize = SZ_4K;
size_t orig_size = size;
int ret = 0;
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
iova, &paddr, size, pgsize);
return -EINVAL;
}
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
if (ret)
break;
iova += pgsize;
paddr += pgsize;
size -= pgsize;
}
/* unroll mapping in case something went wrong */
if (ret)
etnaviv_context_unmap(context, orig_iova, orig_size - size);
return ret;
}
static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
{ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i;
int ret;
if (!context || !sgt)
return -EINVAL;
for_each_sgtable_dma_sg(sgt, sg, i) {
phys_addr_t pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset;
VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
da += bytes;
}
context->flush_seq++;
return 0;
fail:
etnaviv_context_unmap(context, iova, da - iova);
return ret;
}
static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len)
{
struct scatterlist *sg;
unsigned int da = iova;
int i;
for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
context->flush_seq++;
}
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
lockdep_assert_held(&context->lock);
etnaviv_iommu_unmap(context, mapping->vram_node.start,
etnaviv_obj->sgt, etnaviv_obj->base.size);
drm_mm_remove_node(&mapping->vram_node);
}
void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_iommu_context *context = mapping->context;
lockdep_assert_held(&context->lock);
WARN_ON(mapping->use);
etnaviv_iommu_remove_mapping(context, mapping);
etnaviv_iommu_context_put(mapping->context);
mapping->context = NULL;
list_del_init(&mapping->mmu_node);
}
static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret;
lockdep_assert_held(&context->lock);
while (1) {
struct etnaviv_vram_mapping *m, *n;
struct drm_mm_scan scan;
struct list_head list;
bool found;
ret = drm_mm_insert_node_in_range(&context->mm, node,
size, 0, 0, 0, U64_MAX, mode);
if (ret != -ENOSPC)
break;
/* Try to retire some entries */
drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
list_for_each_entry(free, &context->mappings, mmu_node) {
/* If this vram node has not been used, skip this. */
if (!free->vram_node.mm)
continue;
/*
* If the iova is pinned, then it's in-use,
* so we must keep its mapping.
*/
if (free->use)
continue;
list_add(&free->scan_node, &list);
if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
found = true;
break;
}
}
if (!found) {
/* Nothing found, clean up and fail */
list_for_each_entry_safe(m, n, &list, scan_node)
BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
break;
}
/*
* drm_mm does not allow any other operations while
* scanning, so we have to remove all blocks first.
* If drm_mm_scan_remove_block() returns false, we
* can leave the block pinned.
*/
list_for_each_entry_safe(m, n, &list, scan_node)
if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
list_del_init(&m->scan_node);
/*
* Unmap the blocks which need to be reaped from the MMU.
* Clear the mmu pointer to prevent the mapping_get finding
* this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_reap_mapping(m);
list_del_init(&m->scan_node);
}
mode = DRM_MM_INSERT_EVICT;
/*
* We removed enough mappings so that the new allocation will
* succeed, retry the allocation one more time.
*/
}
return ret;
}
static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size, u64 va)
{
struct etnaviv_vram_mapping *m, *n;
struct drm_mm_node *scan_node;
LIST_HEAD(scan_list);
int ret;
lockdep_assert_held(&context->lock);
ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
va + size, DRM_MM_INSERT_LOWEST);
if (ret != -ENOSPC)
return ret;
/*
* When we can't insert the node, due to a existing mapping blocking
* the address space, there are two possible reasons:
* 1. Userspace genuinely messed up and tried to reuse address space
* before the last job using this VMA has finished executing.
* 2. The existing buffer mappings are idle, but the buffers are not
* destroyed yet (likely due to being referenced by another context) in
* which case the mappings will not be cleaned up and we must reap them
* here to make space for the new mapping.
*/
drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
m = container_of(scan_node, struct etnaviv_vram_mapping,
vram_node);
if (m->use)
return -ENOSPC;
list_add(&m->scan_node, &scan_list);
}
list_for_each_entry_safe(m, n, &scan_list, scan_node) {
etnaviv_iommu_reap_mapping(m);
list_del_init(&m->scan_node);
}
return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
va + size, DRM_MM_INSERT_LOWEST);
}
int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping, u64 va)
{
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
mutex_lock(&context->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
if (context->global->version == ETNAVIV_IOMMU_V1 &&
sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
mapping->context = etnaviv_iommu_context_get(context);
list_add_tail(&mapping->mmu_node, &context->mappings);
ret = 0;
goto unlock;
}
}
node = &mapping->vram_node;
if (va)
ret = etnaviv_iommu_insert_exact(context, node,
etnaviv_obj->base.size, va);
else
ret = etnaviv_iommu_find_iova(context, node,
etnaviv_obj->base.size);
if (ret < 0)
goto unlock;
mapping->iova = node->start;
ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
drm_mm_remove_node(node);
goto unlock;
}
mapping->context = etnaviv_iommu_context_get(context);
list_add_tail(&mapping->mmu_node, &context->mappings);
unlock:
mutex_unlock(&context->lock);
return ret;
}
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
WARN_ON(mapping->use);
mutex_lock(&context->lock);
/* Bail if the mapping has been reaped by another thread */
if (!mapping->context) {
mutex_unlock(&context->lock);
return;
}
/* If the vram node is on the mm, unmap and remove the node */
if (mapping->vram_node.mm == &context->mm)
etnaviv_iommu_remove_mapping(context, mapping);
list_del(&mapping->mmu_node);
mutex_unlock(&context->lock);
etnaviv_iommu_context_put(context);
}
static void etnaviv_iommu_context_free(struct kref *kref)
{
struct etnaviv_iommu_context *context =
container_of(kref, struct etnaviv_iommu_context, refcount);
etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
context->global->ops->free(context);
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
{
kref_put(&context->refcount, etnaviv_iommu_context_free);
}
struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc)
{
struct etnaviv_iommu_context *ctx;
int ret;
if (global->version == ETNAVIV_IOMMU_V1)
ctx = etnaviv_iommuv1_context_alloc(global);
else
ctx = etnaviv_iommuv2_context_alloc(global);
if (!ctx)
return NULL;
ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
global->memory_base);
if (ret)
goto out_free;
if (global->version == ETNAVIV_IOMMU_V1 &&
ctx->cmdbuf_mapping.iova > 0x80000000) {
dev_err(global->dev,
"command buffer outside valid memory window\n");
goto out_unmap;
}
return ctx;
out_unmap:
etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
out_free:
global->ops->free(ctx);
return NULL;
}
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
context->global->ops->restore(gpu, context);
}
int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping,
u32 memory_base, dma_addr_t paddr,
size_t size)
{
mutex_lock(&context->lock);
if (mapping->use > 0) {
mapping->use++;
mutex_unlock(&context->lock);
return 0;
}
/*
* For MMUv1 we don't add the suballoc region to the pagetables, as
* those GPUs can only work with cmdbufs accessed through the linear
* window. Instead we manufacture a mapping to make it look uniform
* to the upper layers.
*/
if (context->global->version == ETNAVIV_IOMMU_V1) {
mapping->iova = paddr - memory_base;
} else {
struct drm_mm_node *node = &mapping->vram_node;
int ret;
ret = etnaviv_iommu_find_iova(context, node, size);
if (ret < 0) {
mutex_unlock(&context->lock);
return ret;
}
mapping->iova = node->start;
ret = etnaviv_context_map(context, node->start, paddr, size,
ETNAVIV_PROT_READ);
if (ret < 0) {
drm_mm_remove_node(node);
mutex_unlock(&context->lock);
return ret;
}
context->flush_seq++;
}
list_add_tail(&mapping->mmu_node, &context->mappings);
mapping->use = 1;
mutex_unlock(&context->lock);
return 0;
}
void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
struct drm_mm_node *node = &mapping->vram_node;
mutex_lock(&context->lock);
mapping->use--;
if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
mutex_unlock(&context->lock);
return;
}
etnaviv_context_unmap(context, node->start, node->size);
drm_mm_remove_node(node);
mutex_unlock(&context->lock);
}
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
{
return context->global->ops->dump_size(context);
}
void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
{
context->global->ops->dump(context, buf);
}
int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
{
enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
struct etnaviv_iommu_global *global;
struct device *dev = gpu->drm->dev;
if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
version = ETNAVIV_IOMMU_V2;
if (priv->mmu_global) {
if (priv->mmu_global->version != version) {
dev_err(gpu->dev,
"MMU version doesn't match global version\n");
return -ENXIO;
}
priv->mmu_global->use++;
return 0;
}
global = kzalloc(sizeof(*global), GFP_KERNEL);
if (!global)
return -ENOMEM;
global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
GFP_KERNEL);
if (!global->bad_page_cpu)
goto free_global;
memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
if (version == ETNAVIV_IOMMU_V2) {
global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
&global->v2.pta_dma, GFP_KERNEL);
if (!global->v2.pta_cpu)
goto free_bad_page;
}
global->dev = dev;
global->version = version;
global->use = 1;
mutex_init(&global->lock);
if (version == ETNAVIV_IOMMU_V1)
global->ops = &etnaviv_iommuv1_ops;
else
global->ops = &etnaviv_iommuv2_ops;
priv->mmu_global = global;
return 0;
free_bad_page:
dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
free_global:
kfree(global);
return -ENOMEM;
}
void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
{
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
struct etnaviv_iommu_global *global = priv->mmu_global;
if (!global)
return;
if (--global->use > 0)
return;
if (global->v2.pta_cpu)
dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
global->v2.pta_cpu, global->v2.pta_dma);
if (global->bad_page_cpu)
dma_free_wc(global->dev, SZ_4K,
global->bad_page_cpu, global->bad_page_dma);
mutex_destroy(&global->lock);
kfree(global);
priv->mmu_global = NULL;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_mmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
#include "etnaviv_sched.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
#include "cmdstream.xml.h"
static const struct platform_device_id gpu_ids[] = {
{ .name = "etnaviv-gpu,2d" },
{ },
};
/*
* Driver functions:
*/
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
{
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
switch (param) {
case ETNAVIV_PARAM_GPU_MODEL:
*value = gpu->identity.model;
break;
case ETNAVIV_PARAM_GPU_REVISION:
*value = gpu->identity.revision;
break;
case ETNAVIV_PARAM_GPU_FEATURES_0:
*value = gpu->identity.features;
break;
case ETNAVIV_PARAM_GPU_FEATURES_1:
*value = gpu->identity.minor_features0;
break;
case ETNAVIV_PARAM_GPU_FEATURES_2:
*value = gpu->identity.minor_features1;
break;
case ETNAVIV_PARAM_GPU_FEATURES_3:
*value = gpu->identity.minor_features2;
break;
case ETNAVIV_PARAM_GPU_FEATURES_4:
*value = gpu->identity.minor_features3;
break;
case ETNAVIV_PARAM_GPU_FEATURES_5:
*value = gpu->identity.minor_features4;
break;
case ETNAVIV_PARAM_GPU_FEATURES_6:
*value = gpu->identity.minor_features5;
break;
case ETNAVIV_PARAM_GPU_FEATURES_7:
*value = gpu->identity.minor_features6;
break;
case ETNAVIV_PARAM_GPU_FEATURES_8:
*value = gpu->identity.minor_features7;
break;
case ETNAVIV_PARAM_GPU_FEATURES_9:
*value = gpu->identity.minor_features8;
break;
case ETNAVIV_PARAM_GPU_FEATURES_10:
*value = gpu->identity.minor_features9;
break;
case ETNAVIV_PARAM_GPU_FEATURES_11:
*value = gpu->identity.minor_features10;
break;
case ETNAVIV_PARAM_GPU_FEATURES_12:
*value = gpu->identity.minor_features11;
break;
case ETNAVIV_PARAM_GPU_STREAM_COUNT:
*value = gpu->identity.stream_count;
break;
case ETNAVIV_PARAM_GPU_REGISTER_MAX:
*value = gpu->identity.register_max;
break;
case ETNAVIV_PARAM_GPU_THREAD_COUNT:
*value = gpu->identity.thread_count;
break;
case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
*value = gpu->identity.vertex_cache_size;
break;
case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
*value = gpu->identity.shader_core_count;
break;
case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
*value = gpu->identity.pixel_pipes;
break;
case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
*value = gpu->identity.vertex_output_buffer_size;
break;
case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
*value = gpu->identity.buffer_size;
break;
case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
*value = gpu->identity.instruction_count;
break;
case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
*value = gpu->identity.num_constants;
break;
case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
*value = gpu->identity.varyings_count;
break;
case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
*value = ETNAVIV_SOFTPIN_START_ADDRESS;
else
*value = ~0ULL;
break;
case ETNAVIV_PARAM_GPU_PRODUCT_ID:
*value = gpu->identity.product_id;
break;
case ETNAVIV_PARAM_GPU_CUSTOMER_ID:
*value = gpu->identity.customer_id;
break;
case ETNAVIV_PARAM_GPU_ECO_ID:
*value = gpu->identity.eco_id;
break;
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
}
return 0;
}
#define etnaviv_is_model_rev(gpu, mod, rev) \
((gpu)->identity.model == chipModel_##mod && \
(gpu)->identity.revision == rev)
#define etnaviv_field(val, field) \
(((val) & field##__MASK) >> field##__SHIFT)
static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
{
if (gpu->identity.minor_features0 &
chipMinorFeatures0_MORE_MINOR_FEATURES) {
u32 specs[4];
unsigned int streams;
specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
gpu->identity.stream_count = etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_STREAM_COUNT);
gpu->identity.register_max = etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_REGISTER_MAX);
gpu->identity.thread_count = etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_THREAD_COUNT);
gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
gpu->identity.shader_core_count = etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
gpu->identity.pixel_pipes = etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
gpu->identity.vertex_output_buffer_size =
etnaviv_field(specs[0],
VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
gpu->identity.buffer_size = etnaviv_field(specs[1],
VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
gpu->identity.instruction_count = etnaviv_field(specs[1],
VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
gpu->identity.num_constants = etnaviv_field(specs[1],
VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
gpu->identity.varyings_count = etnaviv_field(specs[2],
VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
/* This overrides the value from older register if non-zero */
streams = etnaviv_field(specs[3],
VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
if (streams)
gpu->identity.stream_count = streams;
}
/* Fill in the stream count if not specified */
if (gpu->identity.stream_count == 0) {
if (gpu->identity.model >= 0x1000)
gpu->identity.stream_count = 4;
else
gpu->identity.stream_count = 1;
}
/* Convert the register max value */
if (gpu->identity.register_max)
gpu->identity.register_max = 1 << gpu->identity.register_max;
else if (gpu->identity.model == chipModel_GC400)
gpu->identity.register_max = 32;
else
gpu->identity.register_max = 64;
/* Convert thread count */
if (gpu->identity.thread_count)
gpu->identity.thread_count = 1 << gpu->identity.thread_count;
else if (gpu->identity.model == chipModel_GC400)
gpu->identity.thread_count = 64;
else if (gpu->identity.model == chipModel_GC500 ||
gpu->identity.model == chipModel_GC530)
gpu->identity.thread_count = 128;
else
gpu->identity.thread_count = 256;
if (gpu->identity.vertex_cache_size == 0)
gpu->identity.vertex_cache_size = 8;
if (gpu->identity.shader_core_count == 0) {
if (gpu->identity.model >= 0x1000)
gpu->identity.shader_core_count = 2;
else
gpu->identity.shader_core_count = 1;
}
if (gpu->identity.pixel_pipes == 0)
gpu->identity.pixel_pipes = 1;
/* Convert virtex buffer size */
if (gpu->identity.vertex_output_buffer_size) {
gpu->identity.vertex_output_buffer_size =
1 << gpu->identity.vertex_output_buffer_size;
} else if (gpu->identity.model == chipModel_GC400) {
if (gpu->identity.revision < 0x4000)
gpu->identity.vertex_output_buffer_size = 512;
else if (gpu->identity.revision < 0x4200)
gpu->identity.vertex_output_buffer_size = 256;
else
gpu->identity.vertex_output_buffer_size = 128;
} else {
gpu->identity.vertex_output_buffer_size = 512;
}
switch (gpu->identity.instruction_count) {
case 0:
if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
gpu->identity.model == chipModel_GC880)
gpu->identity.instruction_count = 512;
else
gpu->identity.instruction_count = 256;
break;
case 1:
gpu->identity.instruction_count = 1024;
break;
case 2:
gpu->identity.instruction_count = 2048;
break;
default:
gpu->identity.instruction_count = 256;
break;
}
if (gpu->identity.num_constants == 0)
gpu->identity.num_constants = 168;
if (gpu->identity.varyings_count == 0) {
if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
gpu->identity.varyings_count = 12;
else
gpu->identity.varyings_count = 8;
}
/*
* For some cores, two varyings are consumed for position, so the
* maximum varying count needs to be reduced by one.
*/
if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
etnaviv_is_model_rev(gpu, GC880, 0x5106))
gpu->identity.varyings_count -= 1;
}
static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
{
u32 chipIdentity;
chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
/* Special case for older graphic cores. */
if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
gpu->identity.model = chipModel_GC500;
gpu->identity.revision = etnaviv_field(chipIdentity,
VIVS_HI_CHIP_IDENTITY_REVISION);
} else {
u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
/*
* Reading these two registers on GC600 rev 0x19 result in a
* unhandled fault: external abort on non-linefetch
*/
if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
}
/*
* !!!! HACK ALERT !!!!
* Because people change device IDs without letting software
* know about it - here is the hack to make it all look the
* same. Only for GC400 family.
*/
if ((gpu->identity.model & 0xff00) == 0x0400 &&
gpu->identity.model != chipModel_GC420) {
gpu->identity.model = gpu->identity.model & 0x0400;
}
/* Another special case */
if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
if (chipDate == 0x20080814 && chipTime == 0x12051100) {
/*
* This IP has an ECO; put the correct
* revision in it.
*/
gpu->identity.revision = 0x1051;
}
}
/*
* NXP likes to call the GPU on the i.MX6QP GC2000+, but in
* reality it's just a re-branded GC3000. We can identify this
* core by the upper half of the revision register being all 1.
* Fix model/rev here, so all other places can refer to this
* core by its real identity.
*/
if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
gpu->identity.model = chipModel_GC3000;
gpu->identity.revision &= 0xffff;
}
if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
gpu->identity.eco_id = 1;
if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
gpu->identity.eco_id = 1;
}
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
gpu->identity.model, gpu->identity.revision);
gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
/*
* If there is a match in the HWDB, we aren't interested in the
* remaining register values, as they might be wrong.
*/
if (etnaviv_fill_identity_from_hwdb(gpu))
return;
gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
/* Disable fast clear on GC700. */
if (gpu->identity.model == chipModel_GC700)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
/* These models/revisions don't have the 2D pipe bit */
if ((gpu->identity.model == chipModel_GC500 &&
gpu->identity.revision <= 2) ||
gpu->identity.model == chipModel_GC300)
gpu->identity.features |= chipFeatures_PIPE_2D;
if ((gpu->identity.model == chipModel_GC500 &&
gpu->identity.revision < 2) ||
(gpu->identity.model == chipModel_GC300 &&
gpu->identity.revision < 0x2000)) {
/*
* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
* registers.
*/
gpu->identity.minor_features0 = 0;
gpu->identity.minor_features1 = 0;
gpu->identity.minor_features2 = 0;
gpu->identity.minor_features3 = 0;
gpu->identity.minor_features4 = 0;
gpu->identity.minor_features5 = 0;
} else
gpu->identity.minor_features0 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
if (gpu->identity.minor_features0 &
chipMinorFeatures0_MORE_MINOR_FEATURES) {
gpu->identity.minor_features1 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
gpu->identity.minor_features2 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
gpu->identity.minor_features3 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
gpu->identity.minor_features4 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
gpu->identity.minor_features5 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
}
/* GC600/300 idle register reports zero bits where modules aren't present */
if (gpu->identity.model == chipModel_GC600 ||
gpu->identity.model == chipModel_GC300)
gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
VIVS_HI_IDLE_STATE_RA |
VIVS_HI_IDLE_STATE_SE |
VIVS_HI_IDLE_STATE_PA |
VIVS_HI_IDLE_STATE_SH |
VIVS_HI_IDLE_STATE_PE |
VIVS_HI_IDLE_STATE_DE |
VIVS_HI_IDLE_STATE_FE;
etnaviv_hw_specs(gpu);
}
static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
{
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
}
static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
{
if (gpu->identity.minor_features2 &
chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
clk_set_rate(gpu->clk_core,
gpu->base_rate_core >> gpu->freq_scale);
clk_set_rate(gpu->clk_shader,
gpu->base_rate_shader >> gpu->freq_scale);
} else {
unsigned int fscale = 1 << (6 - gpu->freq_scale);
u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
/*
* Choose number of wait cycles to target a ~30us (1/32768) max latency
* until new work is picked up by the FE when it polls in the idle loop.
* If the GPU base frequency is unknown use 200 wait cycles.
*/
gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale),
200UL, 0xffffUL);
}
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
{
u32 control, idle;
unsigned long timeout;
bool failed = true;
/* We hope that the GPU resets in under one second */
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
/* enable clock */
unsigned int fscale = 1 << (6 - gpu->freq_scale);
control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, control);
/* isolate the GPU. */
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
if (gpu->sec_mode == ETNA_SEC_KERNEL) {
gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
VIVS_MMUv2_AHB_CONTROL_RESET);
} else {
/* set soft reset. */
control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
}
/* wait for reset. */
usleep_range(10, 20);
/* reset soft reset bit. */
control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
/* reset GPU isolation. */
control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
/* read idle register. */
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
/* try resetting again if FE is not idle */
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
dev_dbg(gpu->dev, "FE is not idle\n");
continue;
}
/* read reset register. */
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
/* is the GPU idle? */
if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
dev_dbg(gpu->dev, "GPU is not idle\n");
continue;
}
/* disable debug registers, as they are not normally needed */
control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
failed = false;
break;
}
if (failed) {
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
return -EBUSY;
}
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
gpu->state = ETNA_GPU_STATE_RESET;
gpu->exec_state = -1;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = NULL;
return 0;
}
static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
{
u32 pmc, ppc;
/* enable clock gating */
ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
/* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
if (gpu->identity.revision == 0x4301 ||
gpu->identity.revision == 0x4302)
ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc);
pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS);
/* Disable PA clock gating for GC400+ without bugfix except for GC420 */
if (gpu->identity.model >= chipModel_GC400 &&
gpu->identity.model != chipModel_GC420 &&
!(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
/*
* Disable PE clock gating on revs < 5.0.0.0 when HZ is
* present without a bug fix.
*/
if (gpu->identity.revision < 0x5000 &&
gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
!(gpu->identity.minor_features1 &
chipMinorFeatures1_DISABLE_PE_GATING))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
if (gpu->identity.revision < 0x5422)
pmc |= BIT(15); /* Unknown bit */
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
etnaviv_is_model_rev(gpu, GC2000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC7000, 0x6202))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE |
VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA;
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
}
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
{
gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
VIVS_FE_COMMAND_CONTROL_ENABLE |
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
if (gpu->sec_mode == ETNA_SEC_KERNEL) {
gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
}
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
u16 prefetch;
u32 address;
WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED);
/* setup the MMU */
etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
gpu->state = ETNA_GPU_STATE_RUNNING;
}
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
{
/*
* Base value for VIVS_PM_PULSE_EATER register on models where it
* cannot be read, extracted from vivante kernel driver.
*/
u32 pulse_eater = 0x01590880;
if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
pulse_eater |= BIT(23);
}
if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
pulse_eater &= ~BIT(16);
pulse_eater |= BIT(17);
}
if ((gpu->identity.revision > 0x5420) &&
(gpu->identity.features & chipFeatures_PIPE_3D))
{
/* Performance fix: disable internal DFS */
pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER);
pulse_eater |= BIT(18);
}
gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
}
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED ||
gpu->state == ETNA_GPU_STATE_RESET));
if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
u32 mc_memory_debug;
mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
if (gpu->identity.revision == 0x5007)
mc_memory_debug |= 0x0c;
else
mc_memory_debug |= 0x08;
gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
}
/* enable module-level clock gating */
etnaviv_gpu_enable_mlcg(gpu);
/*
* Update GPU AXI cache atttribute to "cacheable, no allocate".
* This is necessary to prevent the iMX6 SoC locking up.
*/
gpu_write(gpu, VIVS_HI_AXI_CONFIG,
VIVS_HI_AXI_CONFIG_AWCACHE(2) |
VIVS_HI_AXI_CONFIG_ARCACHE(2));
/* GC2000 rev 5108 needs a special bus config */
if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
}
if (gpu->sec_mode == ETNA_SEC_KERNEL) {
u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
}
/* setup the pulse eater */
etnaviv_gpu_setup_pulse_eater(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
gpu->state = ETNA_GPU_STATE_INITIALIZED;
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
{
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
dma_addr_t cmdbuf_paddr;
int ret, i;
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0) {
dev_err(gpu->dev, "Failed to enable GPU power domain\n");
goto pm_put;
}
etnaviv_hw_identify(gpu);
if (gpu->identity.model == 0) {
dev_err(gpu->dev, "Unknown GPU model\n");
ret = -ENXIO;
goto fail;
}
if (gpu->identity.nn_core_count > 0)
dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, "
"for which the UAPI is still experimental\n");
/* Exclude VG cores with FE2.0 */
if (gpu->identity.features & chipFeatures_PIPE_VG &&
gpu->identity.features & chipFeatures_FE20) {
dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
ret = -ENXIO;
goto fail;
}
/*
* On cores with security features supported, we claim control over the
* security states.
*/
if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
(gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
gpu->sec_mode = ETNA_SEC_KERNEL;
gpu->state = ETNA_GPU_STATE_IDENTIFIED;
ret = etnaviv_hw_reset(gpu);
if (ret) {
dev_err(gpu->dev, "GPU reset failed\n");
goto fail;
}
ret = etnaviv_iommu_global_init(gpu);
if (ret)
goto fail;
/*
* If the GPU is part of a system with DMA addressing limitations,
* request pages for our SHM backend buffers from the DMA32 zone to
* hopefully avoid performance killing SWIOTLB bounce buffering.
*/
if (dma_addressing_limited(gpu->dev))
priv->shm_gfp_mask |= GFP_DMA32;
/* Create buffer: */
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
goto fail;
}
/*
* Set the GPU linear window to cover the cmdbuf region, as the GPU
* won't be able to start execution otherwise. The alignment to 128M is
* chosen arbitrarily but helps in debugging, as the MMU offset
* calculations are much more straight forward this way.
*
* On MC1.0 cores the linear window offset is ignored by the TS engine,
* leading to inconsistent memory views. Avoid using the offset on those
* cores if possible, otherwise disable the TS feature.
*/
cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M);
if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
(gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
if (cmdbuf_paddr >= SZ_2G)
priv->mmu_global->memory_base = SZ_2G;
else
priv->mmu_global->memory_base = cmdbuf_paddr;
} else if (cmdbuf_paddr + SZ_128M >= SZ_2G) {
dev_info(gpu->dev,
"Need to move linear window on MC1.0, disabling TS\n");
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
priv->mmu_global->memory_base = SZ_2G;
}
/* Setup event management */
spin_lock_init(&gpu->event_spinlock);
init_completion(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
complete(&gpu->event_free);
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
return 0;
fail:
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
return ret;
}
#ifdef CONFIG_DEBUG_FS
struct dma_debug {
u32 address[2];
u32 state[2];
};
static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
{
u32 i;
debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
for (i = 0; i < 500; i++) {
debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
if (debug->address[0] != debug->address[1])
break;
if (debug->state[0] != debug->state[1])
break;
}
}
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
{
struct dma_debug debug;
u32 dma_lo, dma_hi, axi, idle;
int ret;
seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
goto pm_put;
dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
verify_dma(gpu, &debug);
seq_puts(m, "\tidentity\n");
seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
seq_puts(m, "\tfeatures\n");
seq_printf(m, "\t major_features: 0x%08x\n",
gpu->identity.features);
seq_printf(m, "\t minor_features0: 0x%08x\n",
gpu->identity.minor_features0);
seq_printf(m, "\t minor_features1: 0x%08x\n",
gpu->identity.minor_features1);
seq_printf(m, "\t minor_features2: 0x%08x\n",
gpu->identity.minor_features2);
seq_printf(m, "\t minor_features3: 0x%08x\n",
gpu->identity.minor_features3);
seq_printf(m, "\t minor_features4: 0x%08x\n",
gpu->identity.minor_features4);
seq_printf(m, "\t minor_features5: 0x%08x\n",
gpu->identity.minor_features5);
seq_printf(m, "\t minor_features6: 0x%08x\n",
gpu->identity.minor_features6);
seq_printf(m, "\t minor_features7: 0x%08x\n",
gpu->identity.minor_features7);
seq_printf(m, "\t minor_features8: 0x%08x\n",
gpu->identity.minor_features8);
seq_printf(m, "\t minor_features9: 0x%08x\n",
gpu->identity.minor_features9);
seq_printf(m, "\t minor_features10: 0x%08x\n",
gpu->identity.minor_features10);
seq_printf(m, "\t minor_features11: 0x%08x\n",
gpu->identity.minor_features11);
seq_puts(m, "\tspecs\n");
seq_printf(m, "\t stream_count: %d\n",
gpu->identity.stream_count);
seq_printf(m, "\t register_max: %d\n",
gpu->identity.register_max);
seq_printf(m, "\t thread_count: %d\n",
gpu->identity.thread_count);
seq_printf(m, "\t vertex_cache_size: %d\n",
gpu->identity.vertex_cache_size);
seq_printf(m, "\t shader_core_count: %d\n",
gpu->identity.shader_core_count);
seq_printf(m, "\t nn_core_count: %d\n",
gpu->identity.nn_core_count);
seq_printf(m, "\t pixel_pipes: %d\n",
gpu->identity.pixel_pipes);
seq_printf(m, "\t vertex_output_buffer_size: %d\n",
gpu->identity.vertex_output_buffer_size);
seq_printf(m, "\t buffer_size: %d\n",
gpu->identity.buffer_size);
seq_printf(m, "\t instruction_count: %d\n",
gpu->identity.instruction_count);
seq_printf(m, "\t num_constants: %d\n",
gpu->identity.num_constants);
seq_printf(m, "\t varyings_count: %d\n",
gpu->identity.varyings_count);
seq_printf(m, "\taxi: 0x%08x\n", axi);
seq_printf(m, "\tidle: 0x%08x\n", idle);
idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
seq_puts(m, "\t FE is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
seq_puts(m, "\t DE is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
seq_puts(m, "\t PE is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
seq_puts(m, "\t SH is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
seq_puts(m, "\t PA is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
seq_puts(m, "\t SE is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
seq_puts(m, "\t RA is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
seq_puts(m, "\t TX is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
seq_puts(m, "\t VG is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
seq_puts(m, "\t IM is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
seq_puts(m, "\t FP is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
seq_puts(m, "\t TS is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
seq_puts(m, "\t BL is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
seq_puts(m, "\t ASYNCFE is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
seq_puts(m, "\t MC is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
seq_puts(m, "\t PPA is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
seq_puts(m, "\t WD is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
seq_puts(m, "\t NN is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
seq_puts(m, "\t TP is not idle\n");
if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
seq_puts(m, "\t AXI low power mode\n");
if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
seq_puts(m, "\tMC\n");
seq_printf(m, "\t read0: 0x%08x\n", read0);
seq_printf(m, "\t read1: 0x%08x\n", read1);
seq_printf(m, "\t write: 0x%08x\n", write);
}
seq_puts(m, "\tDMA ");
if (debug.address[0] == debug.address[1] &&
debug.state[0] == debug.state[1]) {
seq_puts(m, "seems to be stuck\n");
} else if (debug.address[0] == debug.address[1]) {
seq_puts(m, "address is constant\n");
} else {
seq_puts(m, "is running\n");
}
seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
dma_lo, dma_hi);
ret = 0;
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
return ret;
}
#endif
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
struct dma_fence base;
};
static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
{
return container_of(fence, struct etnaviv_fence, base);
}
static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
{
return "etnaviv";
}
static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return dev_name(f->gpu->dev);
}
static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
}
static void etnaviv_fence_release(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
kfree_rcu(f, base.rcu);
}
static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
.signaled = etnaviv_fence_signaled,
.release = etnaviv_fence_release,
};
static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
/*
* GPU lock must already be held, otherwise fence completion order might
* not match the seqno order assigned here.
*/
lockdep_assert_held(&gpu->lock);
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
f->gpu = gpu;
dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
gpu->fence_context, ++gpu->next_fence);
return &f->base;
}
/* returns true if fence a comes after fence b */
static inline bool fence_after(u32 a, u32 b)
{
return (s32)(a - b) > 0;
}
/*
* event management:
*/
static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
unsigned int *events)
{
unsigned long timeout = msecs_to_jiffies(10 * 10000);
unsigned i, acquired = 0, rpm_count = 0;
int ret;
for (i = 0; i < nr_events; i++) {
unsigned long remaining;
remaining = wait_for_completion_timeout(&gpu->event_free, timeout);
if (!remaining) {
dev_err(gpu->dev, "wait_for_completion_timeout failed");
ret = -EBUSY;
goto out;
}
acquired++;
timeout = remaining;
}
spin_lock(&gpu->event_spinlock);
for (i = 0; i < nr_events; i++) {
int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
events[i] = event;
memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
set_bit(event, gpu->event_bitmap);
}
spin_unlock(&gpu->event_spinlock);
for (i = 0; i < nr_events; i++) {
ret = pm_runtime_resume_and_get(gpu->dev);
if (ret)
goto out_rpm;
rpm_count++;
}
return 0;
out_rpm:
for (i = 0; i < rpm_count; i++)
pm_runtime_put_autosuspend(gpu->dev);
out:
for (i = 0; i < acquired; i++)
complete(&gpu->event_free);
return ret;
}
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
{
if (!test_bit(event, gpu->event_bitmap)) {
dev_warn(gpu->dev, "event %u is already marked as free",
event);
} else {
clear_bit(event, gpu->event_bitmap);
complete(&gpu->event_free);
}
pm_runtime_put_autosuspend(gpu->dev);
}
/*
* Cmdstream submission/retirement:
*/
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
u32 id, struct drm_etnaviv_timespec *timeout)
{
struct dma_fence *fence;
int ret;
/*
* Look up the fence and take a reference. We might still find a fence
* whose refcount has already dropped to zero. dma_fence_get_rcu
* pretends we didn't find a fence in that case.
*/
rcu_read_lock();
fence = xa_load(&gpu->user_fences, id);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
if (!fence)
return 0;
if (!timeout) {
/* No timeout was requested: just test for completion */
ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
} else {
unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
ret = dma_fence_wait_timeout(fence, true, remaining);
if (ret == 0)
ret = -ETIMEDOUT;
else if (ret != -ERESTARTSYS)
ret = 0;
}
dma_fence_put(fence);
return ret;
}
/*
* Wait for an object to become inactive. This, on it's own, is not race
* free: the object is moved by the scheduler off the active list, and
* then the iova is put. Moreover, the object could be re-submitted just
* after we notice that it's become inactive.
*
* Although the retirement happens under the gpu lock, we don't want to hold
* that lock in this function while waiting.
*/
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj,
struct drm_etnaviv_timespec *timeout)
{
unsigned long remaining;
long ret;
if (!timeout)
return !is_active(etnaviv_obj) ? 0 : -EBUSY;
remaining = etnaviv_timeout_to_jiffies(timeout);
ret = wait_event_interruptible_timeout(gpu->fence_event,
!is_active(etnaviv_obj),
remaining);
if (ret > 0)
return 0;
else if (ret == -ERESTARTSYS)
return -ERESTARTSYS;
else
return -ETIMEDOUT;
}
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
struct etnaviv_event *event, unsigned int flags)
{
const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
if (pmr->flags == flags)
etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
}
}
static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
u32 val;
/* disable clock gating */
val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
/* enable debug register */
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
}
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
u32 val;
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
/* disable debug register */
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
/* enable clock gating */
val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
}
/* add bo's to gpu's ring, and kick gpu: */
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
struct dma_fence *gpu_fence;
unsigned int i, nr_events = 1, event[3];
int ret;
/*
* if there are performance monitor requests we need to have
* - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
* requests.
* - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
* and update the sequence number for userspace.
*/
if (submit->nr_pmrs)
nr_events = 3;
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
pm_runtime_put_noidle(gpu->dev);
return NULL;
}
mutex_lock(&gpu->lock);
gpu_fence = etnaviv_gpu_fence_alloc(gpu);
if (!gpu_fence) {
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
goto out_unlock;
}
if (gpu->state == ETNA_GPU_STATE_INITIALIZED)
etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
if (submit->prev_mmu_context)
etnaviv_iommu_context_put(submit->prev_mmu_context);
submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount);
gpu->event[event[1]].submit = submit;
etnaviv_sync_point_queue(gpu, event[1]);
}
gpu->event[event[0]].fence = gpu_fence;
submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
event[0], &submit->cmdbuf);
if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
kref_get(&submit->refcount);
gpu->event[event[2]].submit = submit;
etnaviv_sync_point_queue(gpu, event[2]);
}
out_unlock:
mutex_unlock(&gpu->lock);
return gpu_fence;
}
static void sync_point_worker(struct work_struct *work)
{
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
sync_point_work);
struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
event->sync_point(gpu, event);
etnaviv_submit_put(event->submit);
event_free(gpu, gpu->sync_point_event);
/* restart FE last to avoid GPU and IRQ racing against this worker */
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
char *comm = NULL, *cmd = NULL;
struct task_struct *task;
unsigned int i;
dev_err(gpu->dev, "recover hung GPU!\n");
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
}
if (comm && cmd)
dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
kfree(cmd);
kfree(comm);
if (pm_runtime_get_sync(gpu->dev) < 0)
goto pm_put;
mutex_lock(&gpu->lock);
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
spin_lock(&gpu->event_spinlock);
for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
event_free(gpu, i);
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
}
static void dump_mmu_fault(struct etnaviv_gpu *gpu)
{
static const char *fault_reasons[] = {
"slave not present",
"page not present",
"write violation",
"out of bounds",
"read security violation",
"write security violation",
};
u32 status_reg, status;
int i;
if (gpu->sec_mode == ETNA_SEC_NONE)
status_reg = VIVS_MMUv2_STATUS;
else
status_reg = VIVS_MMUv2_SEC_STATUS;
status = gpu_read(gpu, status_reg);
dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
for (i = 0; i < 4; i++) {
const char *reason = "unknown";
u32 address_reg;
u32 mmu_status;
mmu_status = (status >> (i * 4)) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK;
if (!mmu_status)
continue;
if ((mmu_status - 1) < ARRAY_SIZE(fault_reasons))
reason = fault_reasons[mmu_status - 1];
if (gpu->sec_mode == ETNA_SEC_NONE)
address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
else
address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
dev_err_ratelimited(gpu->dev,
"MMU %d fault (%s) addr 0x%08x\n",
i, reason, gpu_read(gpu, address_reg));
}
}
static irqreturn_t irq_handler(int irq, void *data)
{
struct etnaviv_gpu *gpu = data;
irqreturn_t ret = IRQ_NONE;
u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
if (intr != 0) {
int event;
pm_runtime_mark_last_busy(gpu->dev);
dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
dev_err(gpu->dev, "AXI bus error\n");
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
}
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
dump_mmu_fault(gpu);
gpu->state = ETNA_GPU_STATE_FAULT;
drm_sched_fault(&gpu->sched);
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
}
while ((event = ffs(intr)) != 0) {
struct dma_fence *fence;
event -= 1;
intr &= ~(1 << event);
dev_dbg(gpu->dev, "event %u\n", event);
if (gpu->event[event].sync_point) {
gpu->sync_point_event = event;
queue_work(gpu->wq, &gpu->sync_point_work);
}
fence = gpu->event[event].fence;
if (!fence)
continue;
gpu->event[event].fence = NULL;
/*
* Events can be processed out of order. Eg,
* - allocate and queue event 0
* - allocate event 1
* - event 0 completes, we process it
* - allocate and queue event 0
* - event 1 and event 0 complete
* we can end up processing event 0 first, then 1.
*/
if (fence_after(fence->seqno, gpu->completed_fence))
gpu->completed_fence = fence->seqno;
dma_fence_signal(fence);
event_free(gpu, event);
}
ret = IRQ_HANDLED;
}
return ret;
}
static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
{
int ret;
ret = clk_prepare_enable(gpu->clk_reg);
if (ret)
return ret;
ret = clk_prepare_enable(gpu->clk_bus);
if (ret)
goto disable_clk_reg;
ret = clk_prepare_enable(gpu->clk_core);
if (ret)
goto disable_clk_bus;
ret = clk_prepare_enable(gpu->clk_shader);
if (ret)
goto disable_clk_core;
return 0;
disable_clk_core:
clk_disable_unprepare(gpu->clk_core);
disable_clk_bus:
clk_disable_unprepare(gpu->clk_bus);
disable_clk_reg:
clk_disable_unprepare(gpu->clk_reg);
return ret;
}
static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
{
clk_disable_unprepare(gpu->clk_shader);
clk_disable_unprepare(gpu->clk_core);
clk_disable_unprepare(gpu->clk_bus);
clk_disable_unprepare(gpu->clk_reg);
return 0;
}
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
if ((idle & gpu->idle_mask) == gpu->idle_mask)
return 0;
if (time_is_before_jiffies(timeout)) {
dev_warn(gpu->dev,
"timed out waiting for idle: idle=0x%x\n",
idle);
return -ETIMEDOUT;
}
udelay(5);
} while (1);
}
static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
if (gpu->state == ETNA_GPU_STATE_RUNNING) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
mutex_unlock(&gpu->lock);
/*
* We know that only the FE is busy here, this should
* happen quickly (as the WAIT is only 200 cycles). If
* we fail, just warn and continue.
*/
etnaviv_gpu_wait_idle(gpu, 100);
gpu->state = ETNA_GPU_STATE_INITIALIZED;
}
gpu->exec_state = -1;
}
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{
int ret;
ret = mutex_lock_killable(&gpu->lock);
if (ret)
return ret;
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
mutex_unlock(&gpu->lock);
return 0;
}
static int
etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = 6;
return 0;
}
static int
etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct etnaviv_gpu *gpu = cdev->devdata;
*state = gpu->freq_scale;
return 0;
}
static int
etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct etnaviv_gpu *gpu = cdev->devdata;
mutex_lock(&gpu->lock);
gpu->freq_scale = state;
if (!pm_runtime_suspended(gpu->dev))
etnaviv_gpu_update_clock(gpu);
mutex_unlock(&gpu->lock);
return 0;
}
static const struct thermal_cooling_device_ops cooling_ops = {
.get_max_state = etnaviv_gpu_cooling_get_max_state,
.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
};
static int etnaviv_gpu_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
struct etnaviv_drm_private *priv = drm->dev_private;
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
if (IS_ERR(gpu->cooling))
return PTR_ERR(gpu->cooling);
}
gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
if (!gpu->wq) {
ret = -ENOMEM;
goto out_thermal;
}
ret = etnaviv_sched_init(gpu);
if (ret)
goto out_workqueue;
if (!IS_ENABLED(CONFIG_PM)) {
ret = etnaviv_gpu_clk_enable(gpu);
if (ret < 0)
goto out_sched;
}
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
spin_lock_init(&gpu->fence_spinlock);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
init_waitqueue_head(&gpu->fence_event);
priv->gpu[priv->num_gpus++] = gpu;
return 0;
out_sched:
etnaviv_sched_fini(gpu);
out_workqueue:
destroy_workqueue(gpu->wq);
out_thermal:
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
return ret;
}
static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
void *data)
{
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
DBG("%s", dev_name(gpu->dev));
destroy_workqueue(gpu->wq);
etnaviv_sched_fini(gpu);
if (IS_ENABLED(CONFIG_PM)) {
pm_runtime_get_sync(gpu->dev);
pm_runtime_put_sync_suspend(gpu->dev);
} else {
etnaviv_gpu_hw_suspend(gpu);
etnaviv_gpu_clk_disable(gpu);
}
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
gpu->drm = NULL;
xa_destroy(&gpu->user_fences);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
gpu->cooling = NULL;
}
static const struct component_ops gpu_ops = {
.bind = etnaviv_gpu_bind,
.unbind = etnaviv_gpu_unbind,
};
static const struct of_device_id etnaviv_gpu_match[] = {
{
.compatible = "vivante,gc"
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
int err;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
if (!gpu)
return -ENOMEM;
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
mutex_init(&gpu->sched_lock);
/* Map registers: */
gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0)
return gpu->irq;
err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
dev_name(gpu->dev), gpu);
if (err) {
dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
return err;
}
/* Get Clocks: */
gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
DBG("clk_reg: %p", gpu->clk_reg);
if (IS_ERR(gpu->clk_reg))
return PTR_ERR(gpu->clk_reg);
gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
DBG("clk_bus: %p", gpu->clk_bus);
if (IS_ERR(gpu->clk_bus))
return PTR_ERR(gpu->clk_bus);
gpu->clk_core = devm_clk_get(&pdev->dev, "core");
DBG("clk_core: %p", gpu->clk_core);
if (IS_ERR(gpu->clk_core))
return PTR_ERR(gpu->clk_core);
gpu->base_rate_core = clk_get_rate(gpu->clk_core);
gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
DBG("clk_shader: %p", gpu->clk_shader);
if (IS_ERR(gpu->clk_shader))
return PTR_ERR(gpu->clk_shader);
gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
/* TODO: figure out max mapped size */
dev_set_drvdata(dev, gpu);
/*
* We treat the device as initially suspended. The runtime PM
* autosuspend delay is rather arbitary: no measurements have
* yet been performed to determine an appropriate value.
*/
pm_runtime_use_autosuspend(gpu->dev);
pm_runtime_set_autosuspend_delay(gpu->dev, 200);
pm_runtime_enable(gpu->dev);
err = component_add(&pdev->dev, &gpu_ops);
if (err < 0) {
dev_err(&pdev->dev, "failed to register component: %d\n", err);
return err;
}
return 0;
}
static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &gpu_ops);
pm_runtime_disable(&pdev->dev);
return 0;
}
static int etnaviv_gpu_rpm_suspend(struct device *dev)
{
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
u32 idle, mask;
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY;
/* Check whether the hardware (except FE and MC) is idle */
mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
VIVS_HI_IDLE_STATE_MC);
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
if (idle != mask) {
dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
idle);
return -EBUSY;
}
etnaviv_gpu_hw_suspend(gpu);
gpu->state = ETNA_GPU_STATE_IDENTIFIED;
return etnaviv_gpu_clk_disable(gpu);
}
static int etnaviv_gpu_rpm_resume(struct device *dev)
{
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
ret = etnaviv_gpu_clk_enable(gpu);
if (ret)
return ret;
/* Re-initialise the basic hardware state */
if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
return ret;
}
}
return 0;
}
static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, NULL)
};
struct platform_driver etnaviv_gpu_driver = {
.driver = {
.name = "etnaviv-gpu",
.owner = THIS_MODULE,
.pm = pm_ptr(&etnaviv_gpu_pm_ops),
.of_match_table = etnaviv_gpu_match,
},
.probe = etnaviv_gpu_platform_probe,
.remove = etnaviv_gpu_platform_remove,
.id_table = gpu_ids,
};
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Etnaviv Project
*/
#include <linux/moduleparam.h>
#include "etnaviv_drv.h"
#include "etnaviv_dump.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
#include "state.xml.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
static int etnaviv_hw_jobs_limit = 4;
module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct dma_fence *fence = NULL;
if (likely(!sched_job->s_fence->finished.error))
fence = etnaviv_gpu_submit(submit);
else
dev_dbg(submit->gpu->dev, "skipping bad job\n");
return fence;
}
static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr;
int change;
/* block scheduler */
drm_sched_stop(&gpu->sched, sched_job);
/*
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
goto out_no_timeout;
/*
* If the GPU is still making forward progress on the front-end (which
* should never loop) we shift out the timeout to give it a chance to
* finish the job.
*/
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr;
if (gpu->state == ETNA_GPU_STATE_RUNNING &&
(gpu->completed_fence != gpu->hangcheck_fence ||
change < 0 || change > 16)) {
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout;
}
if(sched_job)
drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
etnaviv_core_dump(submit);
etnaviv_gpu_recover_hang(submit);
drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
drm_sched_job_cleanup(sched_job);
etnaviv_submit_put(submit);
}
static const struct drm_sched_backend_ops etnaviv_sched_ops = {
.run_job = etnaviv_sched_run_job,
.timedout_job = etnaviv_sched_timedout_job,
.free_job = etnaviv_sched_free_job,
};
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
int ret;
/*
* Hold the sched lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_arm.
*/
mutex_lock(&gpu->sched_lock);
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
submit->out_fence, xa_limit_32b,
&gpu->next_user_fence, GFP_KERNEL);
if (ret < 0) {
drm_sched_job_cleanup(&submit->sched_job);
goto out_unlock;
}
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
mutex_unlock(&gpu->sched_lock);
return ret;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
int ret;
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
dev_name(gpu->dev), gpu->dev);
if (ret)
return ret;
return 0;
}
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
{
drm_sched_fini(&gpu->sched);
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_sched.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014-2018 Etnaviv Project
*/
#include <drm/drm_prime.h>
#include <linux/dma-buf.h>
#include <linux/module.h>
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
MODULE_IMPORT_NS(DMA_BUF);
static struct lock_class_key etnaviv_prime_lock_class;
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
return ERR_PTR(-EINVAL);
return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
}
int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
void *vaddr;
vaddr = etnaviv_gem_vmap(obj);
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
return 0;
}
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
}
return 0;
}
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
etnaviv_gem_put_pages(to_etnaviv_bo(obj));
mutex_unlock(&etnaviv_obj->lock);
}
}
static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
kvfree(etnaviv_obj->pages);
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
}
static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
struct iosys_map map;
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
if (ret)
return NULL;
return map.vaddr;
}
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
int ret;
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
if (!ret) {
/* Drop the reference acquired by drm_gem_mmap_obj(). */
drm_gem_object_put(&etnaviv_obj->base);
}
return ret;
}
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
/* .get_pages should never be called */
.release = etnaviv_gem_prime_release,
.vmap = etnaviv_gem_prime_vmap_impl,
.mmap = etnaviv_gem_prime_mmap_obj,
};
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sgt)
{
struct etnaviv_gem_object *etnaviv_obj;
size_t size = PAGE_ALIGN(attach->dmabuf->size);
int ret, npages;
ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
&etnaviv_gem_prime_ops, &etnaviv_obj);
if (ret < 0)
return ERR_PTR(ret);
lockdep_set_class(&etnaviv_obj->lock, &etnaviv_prime_lock_class);
npages = size / PAGE_SIZE;
etnaviv_obj->sgt = sgt;
etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!etnaviv_obj->pages) {
ret = -ENOMEM;
goto fail;
}
ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
if (ret)
goto fail;
etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
return &etnaviv_obj->base;
fail:
drm_gem_object_put(&etnaviv_obj->base);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Etnaviv Project
*/
#include "etnaviv_gpu.h"
static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
{
.model = 0x400,
.revision = 0x4652,
.product_id = 0x70001,
.customer_id = 0x100,
.eco_id = 0,
.stream_count = 4,
.register_max = 64,
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 256,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 8,
.features = 0xa0e9e004,
.minor_features0 = 0xe1299fff,
.minor_features1 = 0xbe13b219,
.minor_features2 = 0xce110010,
.minor_features3 = 0x8000001,
.minor_features4 = 0x20102,
.minor_features5 = 0x120000,
.minor_features6 = 0x0,
.minor_features7 = 0x0,
.minor_features8 = 0x0,
.minor_features9 = 0x0,
.minor_features10 = 0x0,
.minor_features11 = 0x0,
},
{
.model = 0x520,
.revision = 0x5341,
.product_id = 0x5202,
.customer_id = 0x204,
.eco_id = 0,
.stream_count = 1,
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
.instruction_count = 256,
.num_constants = 168,
.buffer_size = 0,
.varyings_count = 8,
.features = 0xe02c7eca,
.minor_features0 = 0xe9399eff,
.minor_features1 = 0xfe1fb2db,
.minor_features2 = 0xcedf0080,
.minor_features3 = 0x10800005,
.minor_features4 = 0x20000000,
.minor_features5 = 0x00020880,
.minor_features6 = 0x00000000,
.minor_features7 = 0x00001000,
.minor_features8 = 0x00000000,
.minor_features9 = 0x00000000,
.minor_features10 = 0x00000000,
.minor_features11 = 0x00000000,
},
{
.model = 0x7000,
.revision = 0x6202,
.product_id = 0x70003,
.customer_id = 0,
.eco_id = 0,
.stream_count = 8,
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287cad,
.minor_features0 = 0xc1489eff,
.minor_features1 = 0xfefbfad9,
.minor_features2 = 0xeb9d4fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xdb0dafc7,
.minor_features5 = 0x3b5ac333,
.minor_features6 = 0xfccee201,
.minor_features7 = 0x03fffa6f,
.minor_features8 = 0x00e10ef0,
.minor_features9 = 0x0088003c,
.minor_features10 = 0x00004040,
.minor_features11 = 0x00000024,
},
{
.model = 0x7000,
.revision = 0x6203,
.product_id = 0x70003,
.customer_id = 0x4,
.eco_id = 0,
.stream_count = 16,
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287c8d,
.minor_features0 = 0xc1589eff,
.minor_features1 = 0xfefbfad9,
.minor_features2 = 0xeb9d4fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xdb0dafc7,
.minor_features5 = 0x3b5ac333,
.minor_features6 = 0xfcce6000,
.minor_features7 = 0xfffbfa6f,
.minor_features8 = 0x00e10ef3,
.minor_features9 = 0x00c8003c,
.minor_features10 = 0x00004040,
.minor_features11 = 0x00000024,
},
{
.model = 0x7000,
.revision = 0x6204,
.product_id = ~0U,
.customer_id = ~0U,
.eco_id = 0,
.stream_count = 16,
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287c8d,
.minor_features0 = 0xc1589eff,
.minor_features1 = 0xfefbfad9,
.minor_features2 = 0xeb9d4fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xdb0dafc7,
.minor_features5 = 0x3b5ac333,
.minor_features6 = 0xfcce6000,
.minor_features7 = 0xfffbfa6f,
.minor_features8 = 0x00e10ef3,
.minor_features9 = 0x04c8003c,
.minor_features10 = 0x00004060,
.minor_features11 = 0x00000024,
},
{
.model = 0x7000,
.revision = 0x6214,
.product_id = ~0U,
.customer_id = ~0U,
.eco_id = ~0U,
.stream_count = 16,
.register_max = 64,
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287cad,
.minor_features0 = 0xc1799eff,
.minor_features1 = 0xfefbfad9,
.minor_features2 = 0xeb9d4fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xdb0dafc7,
.minor_features5 = 0xbb5ac333,
.minor_features6 = 0xfc8ee200,
.minor_features7 = 0x03fbfa6f,
.minor_features8 = 0x00ef0ef0,
.minor_features9 = 0x0edbf03c,
.minor_features10 = 0x90044250,
.minor_features11 = 0x00000024,
},
{
.model = 0x8000,
.revision = 0x7120,
.product_id = 0x45080009,
.customer_id = 0x88,
.eco_id = 0,
.stream_count = 8,
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287cac,
.minor_features0 = 0xc1799eff,
.minor_features1 = 0xfefbfadb,
.minor_features2 = 0xeb9d6fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xd30dafc7,
.minor_features5 = 0x7b5ac333,
.minor_features6 = 0xfc8ee200,
.minor_features7 = 0x03fffa6f,
.minor_features8 = 0x00fe0ef0,
.minor_features9 = 0x0088003c,
.minor_features10 = 0x108048c0,
.minor_features11 = 0x00000010,
},
{
.model = 0x8000,
.revision = 0x8002,
.product_id = 0x5080009,
.customer_id = 0x9f,
.eco_id = 0x6000000,
.stream_count = 8,
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287cac,
.minor_features0 = 0xc1799eff,
.minor_features1 = 0xfefbfadb,
.minor_features2 = 0xeb9d6fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xd30dafc7,
.minor_features5 = 0x7b5ac333,
.minor_features6 = 0xfc8ee200,
.minor_features7 = 0x03fffa6f,
.minor_features8 = 0x00fe0ef0,
.minor_features9 = 0x0088003c,
.minor_features10 = 0x108048c0,
.minor_features11 = 0x00000010,
},
};
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
{
struct etnaviv_chip_identity *ident = &gpu->identity;
int i;
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
if (etnaviv_chip_identities[i].model == ident->model &&
etnaviv_chip_identities[i].revision == ident->revision &&
(etnaviv_chip_identities[i].product_id == ident->product_id ||
etnaviv_chip_identities[i].product_id == ~0U) &&
(etnaviv_chip_identities[i].customer_id == ident->customer_id ||
etnaviv_chip_identities[i].customer_id == ~0U) &&
(etnaviv_chip_identities[i].eco_id == ident->eco_id ||
etnaviv_chip_identities[i].eco_id == ~0U)) {
memcpy(ident, &etnaviv_chip_identities[i],
sizeof(*ident));
return true;
}
}
return false;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_hwdb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 Etnaviv Project
*/
#include <linux/dma-mapping.h>
#include <drm/drm_mm.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
struct etnaviv_cmdbuf_suballoc {
/* suballocated dma buffer properties */
struct device *dev;
void *vaddr;
dma_addr_t paddr;
/* allocation management */
struct mutex lock;
DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
int free_space;
wait_queue_head_t free_event;
};
struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new(struct device *dev)
{
struct etnaviv_cmdbuf_suballoc *suballoc;
int ret;
suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
if (!suballoc)
return ERR_PTR(-ENOMEM);
suballoc->dev = dev;
mutex_init(&suballoc->lock);
init_waitqueue_head(&suballoc->free_event);
BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE);
suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
&suballoc->paddr, GFP_KERNEL);
if (!suballoc->vaddr) {
ret = -ENOMEM;
goto free_suballoc;
}
return suballoc;
free_suballoc:
kfree(suballoc);
return ERR_PTR(ret);
}
int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping,
u32 memory_base)
{
return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
suballoc->paddr, SUBALLOC_SIZE);
}
void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
etnaviv_iommu_put_suballoc_va(context, mapping);
}
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
{
dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
suballoc->paddr);
kfree(suballoc);
}
int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_cmdbuf *cmdbuf, u32 size)
{
int granule_offs, order, ret;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
retry:
mutex_lock(&suballoc->lock);
granule_offs = bitmap_find_free_region(suballoc->granule_map,
SUBALLOC_GRANULES, order);
if (granule_offs < 0) {
suballoc->free_space = 0;
mutex_unlock(&suballoc->lock);
ret = wait_event_interruptible_timeout(suballoc->free_event,
suballoc->free_space,
msecs_to_jiffies(10 * 1000));
if (!ret) {
dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
return -ETIMEDOUT;
}
goto retry;
}
mutex_unlock(&suballoc->lock);
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
return 0;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{
struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
SUBALLOC_GRANULE);
if (!suballoc)
return;
mutex_lock(&suballoc->lock);
bitmap_release_region(suballoc->granule_map,
cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
order);
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
}
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
struct etnaviv_vram_mapping *mapping)
{
return mapping->iova + buf->suballoc_offset;
}
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
{
return buf->suballoc->paddr + buf->suballoc_offset;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014-2018 Etnaviv Project
*/
#include <drm/drm_drv.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_blt.xml.h"
#include "state_hi.xml.h"
#include "state_3d.xml.h"
#include "cmdstream.xml.h"
/*
* Command Buffer helper:
*/
static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
{
u32 *vaddr = (u32 *)buffer->vaddr;
BUG_ON(buffer->user_size >= buffer->size);
vaddr[buffer->user_size / 4] = data;
buffer->user_size += 4;
}
static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
u32 reg, u32 value)
{
u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
buffer->user_size = ALIGN(buffer->user_size, 8);
/* write a register via cmd stream */
OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
OUT(buffer, value);
}
static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
{
buffer->user_size = ALIGN(buffer->user_size, 8);
OUT(buffer, VIV_FE_END_HEADER_OP_END);
}
static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer,
unsigned int waitcycles)
{
buffer->user_size = ALIGN(buffer->user_size, 8);
OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | waitcycles);
}
static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
u16 prefetch, u32 address)
{
buffer->user_size = ALIGN(buffer->user_size, 8);
OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(prefetch));
OUT(buffer, address);
}
static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
u32 from, u32 to)
{
buffer->user_size = ALIGN(buffer->user_size, 8);
OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
}
static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
{
CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
VIVS_GL_SEMAPHORE_TOKEN_TO(to));
}
static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buffer, u8 pipe)
{
u32 flush = 0;
lockdep_assert_held(&gpu->lock);
/*
* This assumes that if we're switching to 2D, we're switching
* away from 3D, and vice versa. Hence, if we're switching to
* the 2D core, we need to flush the 3D depth and color caches,
* otherwise we need to flush the 2D pixel engine cache.
*/
if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D;
else if (gpu->exec_state == ETNA_PIPE_3D)
flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
VIVS_GL_PIPE_SELECT_PIPE(pipe));
}
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf, u32 off, u32 len)
{
u32 size = buf->size;
u32 *ptr = buf->vaddr + off;
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
ptr, etnaviv_cmdbuf_get_va(buf,
&gpu->mmu_context->cmdbuf_mapping) +
off, size - len * 4 - off);
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
ptr, len * 4, 0);
}
/*
* Safely replace the WAIT of a waitlink with a new command and argument.
* The GPU may be executing this WAIT while we're modifying it, so we have
* to write it in a specific order to avoid the GPU branching to somewhere
* else. 'wl_offset' is the offset to the first byte of the WAIT command.
*/
static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
unsigned int wl_offset, u32 cmd, u32 arg)
{
u32 *lw = buffer->vaddr + wl_offset;
lw[1] = arg;
mb();
lw[0] = cmd;
mb();
}
/*
* Ensure that there is space in the command buffer to contiguously write
* 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
*/
static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
{
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
return etnaviv_cmdbuf_get_va(buffer,
&gpu->mmu_context->cmdbuf_mapping) +
buffer->user_size;
}
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
/* initialize buffer */
buffer->user_size = 0;
CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
return buffer->user_size / 8;
}
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
buffer->user_size = 0;
if (gpu->identity.features & chipFeatures_PIPE_3D) {
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
if (gpu->identity.features & chipFeatures_PIPE_2D) {
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
CMD_END(buffer);
buffer->user_size = ALIGN(buffer->user_size, 8);
return buffer->user_size / 8;
}
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
buffer->user_size = 0;
CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
VIVS_MMUv2_PTA_CONFIG_INDEX(id));
CMD_END(buffer);
buffer->user_size = ALIGN(buffer->user_size, 8);
return buffer->user_size / 8;
}
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);
lockdep_assert_held(&gpu->lock);
if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D;
else if (gpu->exec_state == ETNA_PIPE_3D)
flush = VIVS_GL_FLUSH_CACHE_DEPTH |
VIVS_GL_FLUSH_CACHE_COLOR |
VIVS_GL_FLUSH_CACHE_TEXTURE |
VIVS_GL_FLUSH_CACHE_TEXTUREVS |
VIVS_GL_FLUSH_CACHE_SHADER_L2;
if (flush) {
unsigned int dwords = 7;
if (has_blt)
dwords += 10;
link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
if (has_blt) {
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
}
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
if (gpu->exec_state == ETNA_PIPE_3D) {
if (has_blt) {
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
} else {
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
VIVS_TS_FLUSH_CACHE_FLUSH);
}
}
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
if (has_blt) {
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
}
CMD_END(buffer);
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(dwords),
link_target);
} else {
/* Replace the last link-wait with an "END" command */
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_END_HEADER_OP_END, 0);
}
}
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
lockdep_assert_held(&gpu->lock);
/*
* We need at most 3 dwords in the return target:
* 1 event + 1 end + 1 wait + 1 link.
*/
dwords = 4;
target = etnaviv_buffer_reserve(gpu, buffer, dwords);
/* Signal sync point event */
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
/* Stop the FE to 'pause' the GPU */
CMD_END(buffer);
/* Append waitlink */
CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
/*
* Kick off the 'sync point' command by replacing the previous
* WAIT with a link to the address in the ring buffer.
*/
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(dwords),
target);
}
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
struct etnaviv_iommu_context *mmu_context, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);
lockdep_assert_held(&gpu->lock);
if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
&gpu->mmu_context->cmdbuf_mapping);
link_dwords = cmdbuf->size / 8;
/*
* If we need maintenance prior to submitting this buffer, we will
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
if (need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
if (need_flush) {
if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
extra_dwords += 3;
}
/* pipe switch commands */
if (switch_context)
extra_dwords += 4;
/* PTA load command */
if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
extra_dwords += 1;
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
/*
* Switch MMU context if necessary. Must be done after the
* link target has been calculated, as the jump forward in the
* kernel ring still uses the last active MMU context before
* the switch.
*/
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}
if (need_flush) {
/* Add the MMU flush */
if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
} else {
u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
if (switch_mmu_context &&
gpu->sec_mode == ETNA_SEC_KERNEL) {
unsigned short id =
etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
CMD_LOAD_STATE(buffer,
VIVS_MMUv2_PTA_CONFIG,
VIVS_MMUv2_PTA_CONFIG_INDEX(id));
}
if (gpu->sec_mode == ETNA_SEC_NONE)
flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
flush);
CMD_SEM(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
}
gpu->flush_seq = new_flush_seq;
}
if (switch_context) {
etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
gpu->exec_state = exec_state;
}
/* And the link to the submitted buffer */
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
&gpu->mmu_context->cmdbuf_mapping);
CMD_LINK(buffer, link_dwords, link_target);
/* Update the link target to point to above instructions */
link_target = target;
link_dwords = extra_dwords;
}
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer. return_target is the ring target address.
* We need at most 7 dwords in the return target: 2 cache flush +
* 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
return_dwords = 7;
/*
* When the BLT engine is present we need 6 more dwords in the return
* target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
* but we don't need the normal TS flush state.
*/
if (has_blt)
return_dwords += 6;
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
/*
* Append a cache flush, stall, event, wait and link pointing back to
* the wait command to the ring buffer.
*/
if (gpu->exec_state == ETNA_PIPE_2D) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
VIVS_GL_FLUSH_CACHE_PE2D);
} else {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
VIVS_GL_FLUSH_CACHE_DEPTH |
VIVS_GL_FLUSH_CACHE_COLOR);
if (has_blt) {
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
} else {
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
VIVS_TS_FLUSH_CACHE_FLUSH);
}
}
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
if (has_blt) {
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
}
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
if (drm_debug_enabled(DRM_UT_DRIVER))
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
return_target,
etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
cmdbuf->vaddr, cmdbuf->size, 0);
pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
pr_info("addr: 0x%08x\n", link_target);
pr_info("back: 0x%08x\n", return_target);
pr_info("event: %d\n", event);
}
/*
* Kick off the submitted command by replacing the previous
* WAIT with a link to the address in the ring buffer.
*/
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
link_target);
if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_buffer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Etnaviv Project
*/
#include <drm/drm_file.h>
#include <linux/dma-fence-array.h>
#include <linux/file.h>
#include <linux/pm_runtime.h>
#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_perfmon.h"
#include "etnaviv_sched.h"
/*
* Cmdstream submission:
*/
#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_gem_submit *submit;
size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return NULL;
submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
GFP_KERNEL);
if (!submit->pmrs) {
kfree(submit);
return NULL;
}
submit->nr_pmrs = nr_pmrs;
submit->gpu = gpu;
kref_init(&submit->refcount);
return submit;
}
static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
unsigned nr_bos)
{
struct drm_etnaviv_gem_submit_bo *bo;
unsigned i;
int ret = 0;
spin_lock(&file->table_lock);
for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
struct drm_gem_object *obj;
if (bo->flags & BO_INVALID_FLAGS) {
DRM_ERROR("invalid flags: %x\n", bo->flags);
ret = -EINVAL;
goto out_unlock;
}
submit->bos[i].flags = bo->flags;
if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
DRM_ERROR("invalid softpin address\n");
ret = -EINVAL;
goto out_unlock;
}
submit->bos[i].va = bo->presumed;
}
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
obj = idr_find(&file->object_idr, bo->handle);
if (!obj) {
DRM_ERROR("invalid handle %u at index %u\n",
bo->handle, i);
ret = -EINVAL;
goto out_unlock;
}
/*
* Take a refcount on the object. The file table lock
* prevents the object_idr's refcount on this being dropped.
*/
drm_gem_object_get(obj);
submit->bos[i].obj = to_etnaviv_bo(obj);
}
out_unlock:
submit->nr_bos = i;
spin_unlock(&file->table_lock);
return ret;
}
static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
{
if (submit->bos[i].flags & BO_LOCKED) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
dma_resv_unlock(obj->resv);
submit->bos[i].flags &= ~BO_LOCKED;
}
}
static int submit_lock_objects(struct etnaviv_gem_submit *submit,
struct ww_acquire_ctx *ticket)
{
int contended, slow_locked = -1, i, ret = 0;
retry:
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (slow_locked == i)
slow_locked = -1;
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = dma_resv_lock_interruptible(obj->resv, ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i);
if (ret)
goto fail;
submit->bos[i].flags |= BO_LOCKED;
}
}
ww_acquire_done(ticket);
return 0;
fail:
for (; i >= 0; i--)
submit_unlock_object(submit, i);
if (slow_locked > 0)
submit_unlock_object(submit, slow_locked);
if (ret == -EDEADLK) {
struct drm_gem_object *obj;
obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
goto retry;
}
}
return ret;
}
static int submit_fence_sync(struct etnaviv_gem_submit *submit)
{
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
struct dma_resv *robj = bo->obj->base.resv;
ret = dma_resv_reserve_fences(robj, 1);
if (ret)
return ret;
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
continue;
ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
&bo->obj->base,
bo->flags & ETNA_SUBMIT_BO_WRITE);
if (ret)
return ret;
}
return ret;
}
static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
dma_resv_add_fence(obj->resv, submit->out_fence, write ?
DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
submit_unlock_object(submit, i);
}
}
static int submit_pin_objects(struct etnaviv_gem_submit *submit)
{
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
struct etnaviv_vram_mapping *mapping;
mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
submit->mmu_context,
submit->bos[i].va);
if (IS_ERR(mapping)) {
ret = PTR_ERR(mapping);
break;
}
if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
submit->bos[i].va != mapping->iova) {
etnaviv_gem_mapping_unreference(mapping);
return -EINVAL;
}
atomic_inc(&etnaviv_obj->gpu_active);
submit->bos[i].flags |= BO_PINNED;
submit->bos[i].mapping = mapping;
}
return ret;
}
static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
struct etnaviv_gem_submit_bo **bo)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
return -EINVAL;
}
*bo = &submit->bos[idx];
return 0;
}
/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
u32 nr_relocs)
{
u32 i, last_offset = 0;
u32 *ptr = stream;
int ret;
/* Submits using softpin don't blend with relocs */
if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
return -EINVAL;
for (i = 0; i < nr_relocs; i++) {
const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
struct etnaviv_gem_submit_bo *bo;
u32 off;
if (unlikely(r->flags)) {
DRM_ERROR("invalid reloc flags\n");
return -EINVAL;
}
if (r->submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
r->submit_offset);
return -EINVAL;
}
/* offset in dwords: */
off = r->submit_offset / 4;
if ((off >= size ) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
return -EINVAL;
}
ret = submit_bo(submit, r->reloc_idx, &bo);
if (ret)
return ret;
if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL;
}
ptr[off] = bo->mapping->iova + r->reloc_offset;
last_offset = off;
}
return 0;
}
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
{
u32 i;
for (i = 0; i < submit->nr_pmrs; i++) {
const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
struct etnaviv_gem_submit_bo *bo;
int ret;
ret = submit_bo(submit, r->read_idx, &bo);
if (ret)
return ret;
/* at offset 0 a sequence number gets stored used for userspace sync */
if (r->read_offset == 0) {
DRM_ERROR("perfmon request: offset is 0");
return -EINVAL;
}
if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
DRM_ERROR("perfmon request: offset %u outside object", i);
return -EINVAL;
}
if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
DRM_ERROR("perfmon request: flags are not valid");
return -EINVAL;
}
if (etnaviv_pm_req_validate(r, exec_state)) {
DRM_ERROR("perfmon request: domain or signal not valid");
return -EINVAL;
}
submit->pmrs[i].flags = r->flags;
submit->pmrs[i].domain = r->domain;
submit->pmrs[i].signal = r->signal;
submit->pmrs[i].sequence = r->sequence;
submit->pmrs[i].offset = r->read_offset;
submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
}
return 0;
}
static void submit_cleanup(struct kref *kref)
{
struct etnaviv_gem_submit *submit =
container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);
if (submit->mmu_context)
etnaviv_iommu_context_put(submit->mmu_context);
if (submit->prev_mmu_context)
etnaviv_iommu_context_put(submit->prev_mmu_context);
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
/* unpin all objects */
if (submit->bos[i].flags & BO_PINNED) {
etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
atomic_dec(&etnaviv_obj->gpu_active);
submit->bos[i].mapping = NULL;
submit->bos[i].flags &= ~BO_PINNED;
}
/* if the GPU submit failed, objects might still be locked */
submit_unlock_object(submit, i);
drm_gem_object_put(&etnaviv_obj->base);
}
wake_up_all(&submit->gpu->fence_event);
if (submit->out_fence) {
/*
* Remove from user fence array before dropping the reference,
* so fence can not be found in lookup anymore.
*/
xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
dma_fence_put(submit->out_fence);
}
put_pid(submit->pid);
kfree(submit->pmrs);
kfree(submit);
}
void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
{
kref_put(&submit->refcount, submit_cleanup);
}
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_file_private *ctx = file->driver_priv;
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
struct etnaviv_gpu *gpu;
struct sync_file *sync_file = NULL;
struct ww_acquire_ctx ticket;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
void *stream;
int ret;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
if (args->stream_size % 4) {
DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
args->stream_size);
return -EINVAL;
}
if (args->exec_state != ETNA_PIPE_3D &&
args->exec_state != ETNA_PIPE_2D &&
args->exec_state != ETNA_PIPE_VG) {
DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
return -EINVAL;
}
if (args->flags & ~ETNA_SUBMIT_FLAGS) {
DRM_ERROR("invalid flags: 0x%x\n", args->flags);
return -EINVAL;
}
if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
DRM_ERROR("softpin requested on incompatible MMU\n");
return -EINVAL;
}
if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n");
return -EINVAL;
}
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
*/
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
if (!bos || !relocs || !pmrs || !stream) {
ret = -ENOMEM;
goto err_submit_cmds;
}
ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
args->nr_relocs * sizeof(*relocs));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
args->nr_pmrs * sizeof(*pmrs));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
goto err_submit_cmds;
}
}
ww_acquire_init(&ticket, &reservation_ww_class);
submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
if (!submit) {
ret = -ENOMEM;
goto err_submit_ww_acquire;
}
submit->pid = pid;
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
goto err_submit_put;
submit->ctx = file->driver_priv;
submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
submit->ctx);
if (ret)
goto err_submit_put;
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_job;
if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
goto err_submit_job;
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = -EINVAL;
goto err_submit_job;
}
ret = drm_sched_job_add_dependency(&submit->sched_job,
in_fence);
if (ret)
goto err_submit_job;
}
ret = submit_pin_objects(submit);
if (ret)
goto err_submit_job;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
goto err_submit_job;
ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
if (ret)
goto err_submit_job;
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
ret = submit_lock_objects(submit, &ticket);
if (ret)
goto err_submit_job;
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_job;
ret = etnaviv_sched_push_job(submit);
if (ret)
goto err_submit_job;
submit_attach_object_fences(submit);
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
* This can be improved: ideally we want to allocate the sync
* file before kicking off the GPU job and just attach the
* fence to the sync file here, eliminating the ENOMEM
* possibility at this stage.
*/
sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
/*
* When this late error is hit, the submit has already
* been handed over to the scheduler. At this point
* the sched_job must not be cleaned up.
*/
goto err_submit_put;
}
fd_install(out_fence_fd, sync_file->file);
}
args->fence_fd = out_fence_fd;
args->fence = submit->out_fence_id;
err_submit_job:
if (ret)
drm_sched_job_cleanup(&submit->sched_job);
err_submit_put:
etnaviv_submit_put(submit);
err_submit_ww_acquire:
ww_acquire_fini(&ticket);
err_submit_cmds:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
kvfree(stream);
kvfree(bos);
kvfree(relocs);
kvfree(pmrs);
return ret;
}
| linux-master | drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat
*/
#include <linux/module.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
#include "udl_drv.h"
static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
udl_sync_pending_urbs(dev);
return 0;
}
static int udl_usb_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
return drm_mode_config_helper_resume(dev);
}
static int udl_usb_reset_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct udl_device *udl = to_udl(dev);
udl_select_std_channel(udl);
return drm_mode_config_helper_resume(dev);
}
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
* See todo.rst for how to fix the issue in the dma-buf framework.
*/
static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct udl_device *udl = to_udl(dev);
if (!udl->dmadev)
return ERR_PTR(-ENODEV);
return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
}
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = udl_driver_gem_prime_import,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct udl_device *udl_driver_create(struct usb_interface *interface)
{
struct udl_device *udl;
int r;
udl = devm_drm_dev_alloc(&interface->dev, &driver,
struct udl_device, drm);
if (IS_ERR(udl))
return udl;
r = udl_init(udl);
if (r)
return ERR_PTR(r);
usb_set_intfdata(interface, udl);
return udl;
}
static int udl_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int r;
struct udl_device *udl;
udl = udl_driver_create(interface);
if (IS_ERR(udl))
return PTR_ERR(udl);
r = drm_dev_register(&udl->drm, 0);
if (r)
return r;
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
drm_fbdev_generic_setup(&udl->drm, 0);
return 0;
}
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_fini(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
}
/*
* There are many DisplayLink-based graphics products, all with unique PIDs.
* So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
* We also require a match on SubClass (0x00) and Protocol (0x00),
* which is compatible with all known USB 2.0 era graphics chips and firmware,
* but allows DisplayLink to increment those for any future incompatible chips
*/
static const struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
.bInterfaceSubClass = 0x00,
.bInterfaceProtocol = 0x00,
.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS |
USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver udl_driver = {
.name = "udl",
.probe = udl_usb_probe,
.disconnect = udl_usb_disconnect,
.suspend = udl_usb_suspend,
.resume = udl_usb_resume,
.reset_resume = udl_usb_reset_resume,
.id_table = id_table,
};
module_usb_driver(udl_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/udl/udl_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <[email protected]>
* Copyright (C) 2009 Jaya Kumar <[email protected]>
* Copyright (C) 2009 Bernie Thompson <[email protected]>
*/
#include <drm/drm.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
#define BULK_SIZE 512
#define NR_USB_REQUEST_CHANNEL 0x12
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
struct usb_device *udev = udl_to_usb_device(udl);
char *desc;
char *buf;
char *desc_end;
u8 total_len = 0;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
return false;
desc = buf;
total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
DRM_INFO("vendor descriptor length:%x data:%11ph\n",
total_len, desc);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
(desc[2] != 0x01) || /* version (2 bytes) */
(desc[3] != 0x00) ||
(desc[4] != total_len - 2)) /* length after type */
goto unrecognized;
desc_end = desc + total_len;
desc += 5; /* the fixed header we've already parsed */
while (desc < desc_end) {
u8 length;
u16 key;
key = le16_to_cpu(*((u16 *) desc));
desc += sizeof(u16);
length = *desc;
desc++;
switch (key) {
case 0x0200: { /* max_area */
u32 max_area;
max_area = le32_to_cpu(*((u32 *)desc));
DRM_DEBUG("DL chip limited to %d pixel modes\n",
max_area);
udl->sku_pixel_limit = max_area;
break;
}
default:
break;
}
desc += length;
}
}
goto success;
unrecognized:
/* allow udlfb to load for now even if firmware unrecognized */
DRM_ERROR("Unrecognized vendor firmware descriptor\n");
success:
kfree(buf);
return true;
}
/*
* Need to ensure a channel is selected before submitting URBs
*/
int udl_select_std_channel(struct udl_device *udl)
{
static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2};
void *sendbuf;
int ret;
struct usb_device *udev = udl_to_usb_device(udl);
sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
if (!sendbuf)
return -ENOMEM;
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
sendbuf, sizeof(set_def_chn),
USB_CTRL_SET_TIMEOUT);
kfree(sendbuf);
return ret < 0 ? ret : 0;
}
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
struct udl_device *udl = unode->dev;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -EPROTO ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
}
}
urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
spin_lock_irqsave(&udl->urbs.lock, flags);
list_add_tail(&unode->entry, &udl->urbs.list);
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
wake_up(&udl->urbs.sleep);
}
static void udl_free_urb_list(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
struct urb_node *unode;
struct urb *urb;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (udl->urbs.count) {
spin_lock_irq(&udl->urbs.lock);
urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
udl->urbs.count--;
spin_unlock_irq(&udl->urbs.lock);
if (WARN_ON(!urb))
break;
unode = urb->context;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(unode);
}
wake_up_all(&udl->urbs.sleep);
}
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
size_t wanted_size = count * size;
struct usb_device *udev = udl_to_usb_device(udl);
spin_lock_init(&udl->urbs.lock);
INIT_LIST_HEAD(&udl->urbs.list);
init_waitqueue_head(&udl->urbs.sleep);
udl->urbs.count = 0;
udl->urbs.available = 0;
retry:
udl->urbs.size = size;
while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
break;
}
unode->urb = urb;
buf = usb_alloc_coherent(udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
if (size > PAGE_SIZE) {
size /= 2;
udl_free_urb_list(dev);
goto retry;
}
break;
}
/* urb->transfer_buffer_length set to actual before submit */
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1),
buf, size, udl_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
list_add_tail(&unode->entry, &udl->urbs.list);
udl->urbs.count++;
udl->urbs.available++;
}
DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
return udl->urbs.count;
}
static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
{
struct urb_node *unode;
assert_spin_locked(&udl->urbs.lock);
/* Wait for an in-flight buffer to complete and get re-queued */
if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
!udl->urbs.count ||
!list_empty(&udl->urbs.list),
udl->urbs.lock, timeout)) {
DRM_INFO("wait for urb interrupted: available: %d\n",
udl->urbs.available);
return NULL;
}
if (!udl->urbs.count)
return NULL;
unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
list_del_init(&unode->entry);
udl->urbs.available--;
return unode->urb;
}
#define GET_URB_TIMEOUT HZ
struct urb *udl_get_urb(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
struct urb *urb;
spin_lock_irq(&udl->urbs.lock);
urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
spin_unlock_irq(&udl->urbs.lock);
return urb;
}
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
{
struct udl_device *udl = to_udl(dev);
int ret;
if (WARN_ON(len > udl->urbs.size)) {
ret = -EINVAL;
goto error;
}
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
error:
if (ret) {
udl_urb_completion(urb); /* because no one else will */
DRM_ERROR("usb_submit_urb error %x\n", ret);
}
return ret;
}
/* wait until all pending URBs have been processed */
void udl_sync_pending_urbs(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
spin_lock_irq(&udl->urbs.lock);
/* 2 seconds as a sane timeout */
if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
udl->urbs.available == udl->urbs.count,
udl->urbs.lock,
msecs_to_jiffies(2000)))
drm_err(dev, "Timeout for syncing pending URBs\n");
spin_unlock_irq(&udl->urbs.lock);
}
int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
DRM_DEBUG("\n");
udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
if (!udl->dmadev)
drm_warn(dev, "buffer sharing not supported"); /* not an error */
mutex_init(&udl->gem_lock);
if (!udl_parse_vendor_descriptor(udl)) {
ret = -ENODEV;
DRM_ERROR("firmware not recognized. Assume incompatible device\n");
goto err;
}
if (udl_select_std_channel(udl))
DRM_ERROR("Selecting channel failed\n");
if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
goto err;
}
DRM_DEBUG("\n");
ret = udl_modeset_init(dev);
if (ret)
goto err;
drm_kms_helper_poll_init(dev);
return 0;
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
put_device(udl->dmadev);
DRM_ERROR("%d\n", ret);
return ret;
}
int udl_drop_usb(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
udl_free_urb_list(dev);
put_device(udl->dmadev);
udl->dmadev = NULL;
return 0;
}
| linux-master | drivers/gpu/drm/udl/udl_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <[email protected]>
* Copyright (C) 2009 Jaya Kumar <[email protected]>
* Copyright (C) 2009 Bernie Thompson <[email protected]>
*/
#include <linux/bitfield.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "udl_drv.h"
#include "udl_proto.h"
/*
* All DisplayLink bulk operations start with 0xaf (UDL_MSG_BULK), followed by
* a specific command code. All operations are written to a command buffer, which
* the driver sends to the device.
*/
static char *udl_set_register(char *buf, u8 reg, u8 val)
{
*buf++ = UDL_MSG_BULK;
*buf++ = UDL_CMD_WRITEREG;
*buf++ = reg;
*buf++ = val;
return buf;
}
static char *udl_vidreg_lock(char *buf)
{
return udl_set_register(buf, UDL_REG_VIDREG, UDL_VIDREG_LOCK);
}
static char *udl_vidreg_unlock(char *buf)
{
return udl_set_register(buf, UDL_REG_VIDREG, UDL_VIDREG_UNLOCK);
}
static char *udl_set_blank_mode(char *buf, u8 mode)
{
return udl_set_register(buf, UDL_REG_BLANKMODE, mode);
}
static char *udl_set_color_depth(char *buf, u8 selection)
{
return udl_set_register(buf, UDL_REG_COLORDEPTH, selection);
}
static char *udl_set_base16bpp(char *buf, u32 base)
{
/* the base pointer is 24 bits wide, 0x20 is hi byte. */
u8 reg20 = FIELD_GET(UDL_BASE_ADDR2_MASK, base);
u8 reg21 = FIELD_GET(UDL_BASE_ADDR1_MASK, base);
u8 reg22 = FIELD_GET(UDL_BASE_ADDR0_MASK, base);
buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR2, reg20);
buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR1, reg21);
buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR0, reg22);
return buf;
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
static char *udl_set_base8bpp(char *buf, u32 base)
{
/* the base pointer is 24 bits wide, 0x26 is hi byte. */
u8 reg26 = FIELD_GET(UDL_BASE_ADDR2_MASK, base);
u8 reg27 = FIELD_GET(UDL_BASE_ADDR1_MASK, base);
u8 reg28 = FIELD_GET(UDL_BASE_ADDR0_MASK, base);
buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR2, reg26);
buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR1, reg27);
buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR0, reg28);
return buf;
}
static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
{
wrptr = udl_set_register(wrptr, reg, value >> 8);
return udl_set_register(wrptr, reg+1, value);
}
/*
* This is kind of weird because the controller takes some
* register values in a different byte order than other registers.
*/
static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value)
{
wrptr = udl_set_register(wrptr, reg, value);
return udl_set_register(wrptr, reg+1, value >> 8);
}
/*
* LFSR is linear feedback shift register. The reason we have this is
* because the display controller needs to minimize the clock depth of
* various counters used in the display path. So this code reverses the
* provided value into the lfsr16 value by counting backwards to get
* the value that needs to be set in the hardware comparator to get the
* same actual count. This makes sense once you read above a couple of
* times and think about it from a hardware perspective.
*/
static u16 udl_lfsr16(u16 actual_count)
{
u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
while (actual_count--) {
lv = ((lv << 1) |
(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
& 0xFFFF;
}
return (u16) lv;
}
/*
* This does LFSR conversion on the value that is to be written.
* See LFSR explanation above for more detail.
*/
static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
{
return udl_set_register_16(wrptr, reg, udl_lfsr16(value));
}
/*
* Takes a DRM display mode and converts it into the DisplayLink
* equivalent register commands.
*/
static char *udl_set_display_mode(char *buf, struct drm_display_mode *mode)
{
u16 reg01 = mode->crtc_htotal - mode->crtc_hsync_start;
u16 reg03 = reg01 + mode->crtc_hdisplay;
u16 reg05 = mode->crtc_vtotal - mode->crtc_vsync_start;
u16 reg07 = reg05 + mode->crtc_vdisplay;
u16 reg09 = mode->crtc_htotal - 1;
u16 reg0b = 1; /* libdlo hardcodes hsync start to 1 */
u16 reg0d = mode->crtc_hsync_end - mode->crtc_hsync_start + 1;
u16 reg0f = mode->hdisplay;
u16 reg11 = mode->crtc_vtotal;
u16 reg13 = 0; /* libdlo hardcodes vsync start to 0 */
u16 reg15 = mode->crtc_vsync_end - mode->crtc_vsync_start;
u16 reg17 = mode->crtc_vdisplay;
u16 reg1b = mode->clock / 5;
buf = udl_set_register_lfsr16(buf, UDL_REG_XDISPLAYSTART, reg01);
buf = udl_set_register_lfsr16(buf, UDL_REG_XDISPLAYEND, reg03);
buf = udl_set_register_lfsr16(buf, UDL_REG_YDISPLAYSTART, reg05);
buf = udl_set_register_lfsr16(buf, UDL_REG_YDISPLAYEND, reg07);
buf = udl_set_register_lfsr16(buf, UDL_REG_XENDCOUNT, reg09);
buf = udl_set_register_lfsr16(buf, UDL_REG_HSYNCSTART, reg0b);
buf = udl_set_register_lfsr16(buf, UDL_REG_HSYNCEND, reg0d);
buf = udl_set_register_16(buf, UDL_REG_HPIXELS, reg0f);
buf = udl_set_register_lfsr16(buf, UDL_REG_YENDCOUNT, reg11);
buf = udl_set_register_lfsr16(buf, UDL_REG_VSYNCSTART, reg13);
buf = udl_set_register_lfsr16(buf, UDL_REG_VSYNCEND, reg15);
buf = udl_set_register_16(buf, UDL_REG_VPIXELS, reg17);
buf = udl_set_register_16be(buf, UDL_REG_PIXELCLOCK5KHZ, reg1b);
return buf;
}
static char *udl_dummy_render(char *wrptr)
{
*wrptr++ = UDL_MSG_BULK;
*wrptr++ = UDL_CMD_WRITECOPY16;
*wrptr++ = 0x00; /* from addr */
*wrptr++ = 0x00;
*wrptr++ = 0x00;
*wrptr++ = 0x01; /* one pixel */
*wrptr++ = 0x00; /* to address */
*wrptr++ = 0x00;
*wrptr++ = 0x00;
return wrptr;
}
static long udl_log_cpp(unsigned int cpp)
{
if (WARN_ON(!is_power_of_2(cpp)))
return -EINVAL;
return __ffs(cpp);
}
static int udl_handle_damage(struct drm_framebuffer *fb,
const struct iosys_map *map,
const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
struct urb *urb;
int log_bpp;
ret = udl_log_cpp(fb->format->cpp[0]);
if (ret < 0)
return ret;
log_bpp = ret;
urb = udl_get_urb(dev);
if (!urb)
return -ENOMEM;
cmd = urb->transfer_buffer;
for (i = clip->y1; i < clip->y2; i++) {
const int line_offset = fb->pitches[0] * i;
const int byte_offset = line_offset + (clip->x1 << log_bpp);
const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
const int byte_width = drm_rect_width(clip) << log_bpp;
ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
return ret;
}
if (cmd > (char *)urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len;
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
*cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
} else {
udl_urb_completion(urb);
}
return 0;
}
/*
* Primary plane
*/
static const uint32_t udl_primary_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const uint64_t udl_primary_plane_fmtmods[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
if (!fb)
return; /* no framebuffer; plane is disabled */
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
if (!drm_dev_enter(dev, &idx))
goto out_drm_gem_fb_end_cpu_access;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
udl_handle_damage(fb, &shadow_plane_state->data[0], &damage);
}
drm_dev_exit(idx);
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = drm_plane_helper_atomic_check,
.atomic_update = udl_primary_plane_helper_atomic_update,
};
static const struct drm_plane_funcs udl_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
/*
* CRTC
*/
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct urb *urb;
char *buf;
int idx;
if (!drm_dev_enter(dev, &idx))
return;
urb = udl_get_urb(dev);
if (!urb)
goto out;
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
buf = udl_set_color_depth(buf, UDL_COLORDEPTH_16BPP);
/* set base for 16bpp segment to 0 */
buf = udl_set_base16bpp(buf, 0);
/* set base for 8bpp segment to end of fb */
buf = udl_set_base8bpp(buf, 2 * mode->vdisplay * mode->hdisplay);
buf = udl_set_display_mode(buf, mode);
buf = udl_set_blank_mode(buf, UDL_BLANKMODE_ON);
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
}
static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct urb *urb;
char *buf;
int idx;
if (!drm_dev_enter(dev, &idx))
return;
urb = udl_get_urb(dev);
if (!urb)
goto out;
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
buf = udl_set_blank_mode(buf, UDL_BLANKMODE_POWERDOWN);
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
}
static const struct drm_crtc_helper_funcs udl_crtc_helper_funcs = {
.atomic_check = drm_crtc_helper_atomic_check,
.atomic_enable = udl_crtc_helper_atomic_enable,
.atomic_disable = udl_crtc_helper_atomic_disable,
};
static const struct drm_crtc_funcs udl_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
/*
* Encoder
*/
static const struct drm_encoder_funcs udl_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
/*
* Connector
*/
static int udl_connector_helper_get_modes(struct drm_connector *connector)
{
struct udl_connector *udl_connector = to_udl_connector(connector);
drm_connector_update_edid_property(connector, udl_connector->edid);
if (udl_connector->edid)
return drm_add_edid_modes(connector, udl_connector->edid);
return 0;
}
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_connector_helper_get_modes,
};
static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
struct udl_device *udl = data;
struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
u8 *read_buff;
int ret;
size_t i;
read_buff = kmalloc(2, GFP_KERNEL);
if (!read_buff)
return -ENOMEM;
for (i = 0; i < len; i++) {
int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x02, (0x80 | (0x02 << 5)), bval,
0xA1, read_buff, 2, USB_CTRL_GET_TIMEOUT);
if (ret < 0) {
drm_err(dev, "Read EDID byte %zu failed err %x\n", i, ret);
goto err_kfree;
} else if (ret < 1) {
ret = -EIO;
drm_err(dev, "Read EDID byte %zu failed\n", i);
goto err_kfree;
}
buf[i] = read_buff[1];
}
kfree(read_buff);
return 0;
err_kfree:
kfree(read_buff);
return ret;
}
static enum drm_connector_status udl_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct udl_device *udl = to_udl(dev);
struct udl_connector *udl_connector = to_udl_connector(connector);
enum drm_connector_status status = connector_status_disconnected;
int idx;
/* cleanup previous EDID */
kfree(udl_connector->edid);
udl_connector->edid = NULL;
if (!drm_dev_enter(dev, &idx))
return connector_status_disconnected;
udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
if (udl_connector->edid)
status = connector_status_connected;
drm_dev_exit(idx);
return status;
}
static void udl_connector_destroy(struct drm_connector *connector)
{
struct udl_connector *udl_connector = to_udl_connector(connector);
drm_connector_cleanup(connector);
kfree(udl_connector->edid);
kfree(udl_connector);
}
static const struct drm_connector_funcs udl_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = udl_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = udl_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
struct drm_connector *udl_connector_init(struct drm_device *dev)
{
struct udl_connector *udl_connector;
struct drm_connector *connector;
int ret;
udl_connector = kzalloc(sizeof(*udl_connector), GFP_KERNEL);
if (!udl_connector)
return ERR_PTR(-ENOMEM);
connector = &udl_connector->connector;
ret = drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_VGA);
if (ret)
goto err_kfree;
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
return connector;
err_kfree:
kfree(udl_connector);
return ERR_PTR(ret);
}
/*
* Modesetting
*/
static enum drm_mode_status udl_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
struct udl_device *udl = to_udl(dev);
if (udl->sku_pixel_limit) {
if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
return MODE_MEM;
}
return MODE_OK;
}
static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.mode_valid = udl_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int udl_modeset_init(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
dev->mode_config.preferred_depth = 16;
dev->mode_config.funcs = &udl_mode_config_funcs;
primary_plane = &udl->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0,
&udl_primary_plane_funcs,
udl_primary_plane_formats,
ARRAY_SIZE(udl_primary_plane_formats),
udl_primary_plane_fmtmods,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
drm_plane_helper_add(primary_plane, &udl_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
crtc = &udl->crtc;
ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
&udl_crtc_funcs, NULL);
if (ret)
return ret;
drm_crtc_helper_add(crtc, &udl_crtc_helper_funcs);
encoder = &udl->encoder;
ret = drm_encoder_init(dev, encoder, &udl_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = udl_connector_init(dev);
if (IS_ERR(connector))
return PTR_ERR(connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
drm_mode_config_reset(dev);
return 0;
}
| linux-master | drivers/gpu/drm/udl/udl_modeset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <[email protected]>
* Copyright (C) 2009 Jaya Kumar <[email protected]>
* Copyright (C) 2009 Bernie Thompson <[email protected]>
*/
#include <asm/unaligned.h>
#include "udl_drv.h"
#include "udl_proto.h"
#define MAX_CMD_PIXELS 255
#define RLX_HEADER_BYTES 7
#define MIN_RLX_PIX_BYTES 4
#define MIN_RLX_CMD_BYTES (RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES)
#define RLE_HEADER_BYTES 6
#define MIN_RLE_PIX_BYTES 3
#define MIN_RLE_CMD_BYTES (RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES)
#define RAW_HEADER_BYTES 6
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
static inline u16 pixel32_to_be16(const uint32_t pixel)
{
return (((pixel >> 3) & 0x001f) |
((pixel >> 5) & 0x07e0) |
((pixel >> 8) & 0xf800));
}
static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
{
u16 pixel_val16;
if (log_bpp == 1)
pixel_val16 = *(const uint16_t *)pixel;
else
pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
return pixel_val16;
}
/*
* Render a command stream for an encoded horizontal line segment of pixels.
*
* A command buffer holds several commands.
* It always begins with a fresh command header
* (the protocol doesn't require this, but we enforce it to allow
* multiple buffers to be potentially encoded and sent in parallel).
* A single command encodes one contiguous horizontal line of pixels
*
* The function relies on the client to do all allocation, so that
* rendering can be done directly to output buffers (e.g. USB URBs).
* The function fills the supplied command buffer, providing information
* on where it left off, so the client may call in again with additional
* buffers if the line will take several buffers to complete.
*
* A single command can transmit a maximum of 256 pixels,
* regardless of the compression ratio (protocol design limit).
* To the hardware, 0 for a size byte means 256
*
* Rather than 256 pixel commands which are either rl or raw encoded,
* the rlx command simply assumes alternating raw and rl spans within one cmd.
* This has a slightly larger header overhead, but produces more even results.
* It also processes all data (read and write) in a single pass.
* Performance benchmarks of common cases show it having just slightly better
* compression than 256 pixel raw or rle commands, with similar CPU consumpion.
* But for very rl friendly data, will compress not quite as well.
*/
static void udl_compress_hline16(
const u8 **pixel_start_ptr,
const u8 *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
const uint8_t *const cmd_buffer_end, int log_bpp)
{
const int bpp = 1 << log_bpp;
const u8 *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
while ((pixel_end > pixel) &&
(cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
uint8_t *raw_pixels_count_byte = NULL;
uint8_t *cmd_pixels_count_byte = NULL;
const u8 *raw_pixel_start = NULL;
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
*cmd++ = UDL_MSG_BULK;
*cmd++ = UDL_CMD_WRITERLX16;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
cmd_pixels_count_byte = cmd++; /* we'll know this later */
cmd_pixel_start = pixel;
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
(unsigned long)(pixel_end - pixel) >> log_bpp,
(unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
pixel_val16 = get_pixel_val16(pixel, log_bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
const uint16_t repeating_pixel_val16 = pixel_val16;
put_unaligned_be16(pixel_val16, cmd);
cmd += 2;
pixel += bpp;
while (pixel < cmd_pixel_end) {
pixel_val16 = get_pixel_val16(pixel, log_bpp);
if (pixel_val16 != repeating_pixel_val16)
break;
pixel += bpp;
}
if (unlikely(pixel > start + bpp)) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
raw_pixel_start) >> log_bpp) + 1) & 0xFF;
/* immediately after raw data is repeat byte */
*cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
raw_pixels_count_byte = cmd++;
}
}
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
} else {
/* undo unused byte */
cmd--;
}
*cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, UDL_MSG_BULK, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}
*command_buffer_ptr = cmd;
*pixel_start_ptr = pixel;
*device_address_ptr = dev_addr;
return;
}
/*
* There are 3 copies of every pixel: The front buffer that the fbdev
* client renders to, the actual framebuffer across the USB bus in hardware
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width)
{
const u8 *line_start, *line_end, *next_pixel;
u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
if (WARN_ON(!(log_bpp == 1 || log_bpp == 2))) {
/* need to finish URB at error from this function */
udl_urb_completion(urb);
return -EINVAL;
}
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
while (next_pixel < line_end) {
udl_compress_hline16(&next_pixel,
line_end, &base16,
(u8 **) &cmd, (u8 *) cmd_end, log_bpp);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
int ret = udl_submit_urb(dev, urb, len);
if (ret)
return ret;
urb = udl_get_urb(dev);
if (!urb)
return -EAGAIN;
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
}
}
*urb_buf_ptr = cmd;
return 0;
}
| linux-master | drivers/gpu/drm/udl/udl_transfer.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Authors:
* Dave Airlie
* Alon Levy
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drm_file.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fpriv *vfpriv)
{
char dbgname[TASK_COMM_LEN];
get_task_comm(dbgname, current);
virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
vfpriv->context_init, strlen(dbgname),
dbgname);
vfpriv->context_created = true;
}
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
mutex_lock(&vfpriv->context_lock);
if (vfpriv->context_created)
goto out_unlock;
virtio_gpu_create_context_locked(vgdev, vfpriv);
out_unlock:
mutex_unlock(&vfpriv->context_lock);
}
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
virtio_gpu_map->handle,
&virtio_gpu_map->offset);
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_getparam *param = data;
int value;
switch (param->param) {
case VIRTGPU_PARAM_3D_FEATURES:
value = vgdev->has_virgl_3d ? 1 : 0;
break;
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
value = 1;
break;
case VIRTGPU_PARAM_RESOURCE_BLOB:
value = vgdev->has_resource_blob ? 1 : 0;
break;
case VIRTGPU_PARAM_HOST_VISIBLE:
value = vgdev->has_host_visible ? 1 : 0;
break;
case VIRTGPU_PARAM_CROSS_DEVICE:
value = vgdev->has_resource_assign_uuid ? 1 : 0;
break;
case VIRTGPU_PARAM_CONTEXT_INIT:
value = vgdev->has_context_init ? 1 : 0;
break;
case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
value = vgdev->capset_id_mask;
break;
default:
return -EINVAL;
}
if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
return -EFAULT;
return 0;
}
static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
struct virtio_gpu_fence *fence;
int ret;
struct virtio_gpu_object *qobj;
struct drm_gem_object *obj;
uint32_t handle = 0;
struct virtio_gpu_object_params params = { 0 };
if (vgdev->has_virgl_3d) {
virtio_gpu_create_context(dev, file);
params.virgl = true;
params.target = rc->target;
params.bind = rc->bind;
params.depth = rc->depth;
params.array_size = rc->array_size;
params.last_level = rc->last_level;
params.nr_samples = rc->nr_samples;
params.flags = rc->flags;
} else {
if (rc->depth > 1)
return -EINVAL;
if (rc->nr_samples > 1)
return -EINVAL;
if (rc->last_level > 1)
return -EINVAL;
if (rc->target != 2)
return -EINVAL;
if (rc->array_size > 1)
return -EINVAL;
}
params.format = rc->format;
params.width = rc->width;
params.height = rc->height;
params.size = rc->size;
/* allocate a single page size object */
if (params.size == 0)
params.size = PAGE_SIZE;
fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence)
return -ENOMEM;
ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence);
dma_fence_put(&fence->f);
if (ret < 0)
return ret;
obj = &qobj->base.base;
ret = drm_gem_handle_create(file, obj, &handle);
if (ret) {
drm_gem_object_release(obj);
return ret;
}
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
* the obj. Otherwise userspace could guess the handle and
* race closing it from another thread.
*/
drm_gem_object_put(obj);
return 0;
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_virtgpu_resource_info *ri = data;
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
gobj = drm_gem_object_lookup(file, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
qobj = gem_to_virtio_gpu_obj(gobj);
ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
if (qobj->host3d_blob || qobj->guest_blob)
ri->blob_mem = qobj->blob_mem;
drm_gem_object_put(gobj);
return 0;
}
static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
virtio_gpu_create_context(dev, file);
objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
if (objs == NULL)
return -ENOENT;
bo = gem_to_virtio_gpu_obj(objs->objs[0]);
if (bo->guest_blob && !bo->host3d_blob) {
ret = -EINVAL;
goto err_put_free;
}
if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
ret = -EINVAL;
goto err_put_free;
}
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
(vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
args->layer_stride, &args->box, objs, fence);
dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
err_unlock:
virtio_gpu_array_unlock_resv(objs);
err_put_free:
virtio_gpu_array_put_free(objs);
return ret;
}
static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
if (objs == NULL)
return -ENOENT;
bo = gem_to_virtio_gpu_obj(objs->objs[0]);
if (bo->guest_blob && !bo->host3d_blob) {
ret = -EINVAL;
goto err_put_free;
}
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
virtio_gpu_create_context(dev, file);
if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
ret = -EINVAL;
goto err_put_free;
}
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0);
if (!fence)
goto err_unlock;
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
args->stride, args->layer_stride, &args->box, objs,
fence);
dma_fence_put(&fence->f);
}
virtio_gpu_notify(vgdev);
return 0;
err_unlock:
virtio_gpu_array_unlock_resv(objs);
err_put_free:
virtio_gpu_array_put_free(objs);
return ret;
}
static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_virtgpu_3d_wait *args = data;
struct drm_gem_object *obj;
long timeout = 15 * HZ;
int ret;
obj = drm_gem_object_lookup(file, args->handle);
if (obj == NULL)
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
} else {
ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
true, timeout);
}
if (ret == 0)
ret = -EBUSY;
else if (ret > 0)
ret = 0;
drm_gem_object_put(obj);
return ret;
}
static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_get_caps *args = data;
unsigned size, host_caps_size;
int i;
int found_valid = -1;
int ret;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *ptr;
if (vgdev->num_capsets == 0)
return -ENOSYS;
/* don't allow userspace to pass 0 */
if (args->size == 0)
return -EINVAL;
spin_lock(&vgdev->display_info_lock);
for (i = 0; i < vgdev->num_capsets; i++) {
if (vgdev->capsets[i].id == args->cap_set_id) {
if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
found_valid = i;
break;
}
}
}
if (found_valid == -1) {
spin_unlock(&vgdev->display_info_lock);
return -EINVAL;
}
host_caps_size = vgdev->capsets[found_valid].max_size;
/* only copy to user the minimum of the host caps size or the guest caps size */
size = min(args->size, host_caps_size);
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
if (cache_ent->id == args->cap_set_id &&
cache_ent->version == args->cap_set_ver) {
spin_unlock(&vgdev->display_info_lock);
goto copy_exit;
}
}
spin_unlock(&vgdev->display_info_lock);
/* not in cache - need to talk to hw */
ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
if (ret)
return ret;
virtio_gpu_notify(vgdev);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
atomic_read(&cache_ent->is_valid), 5 * HZ);
if (!ret)
return -EBUSY;
/* is_valid check must proceed before copy of the cache entry. */
smp_rmb();
ptr = cache_ent->caps_cache;
if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
return -EFAULT;
return 0;
}
static int verify_blob(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fpriv *vfpriv,
struct virtio_gpu_object_params *params,
struct drm_virtgpu_resource_create_blob *rc_blob,
bool *guest_blob, bool *host3d_blob)
{
if (!vgdev->has_resource_blob)
return -EINVAL;
if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
return -EINVAL;
if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
if (!vgdev->has_resource_assign_uuid)
return -EINVAL;
}
switch (rc_blob->blob_mem) {
case VIRTGPU_BLOB_MEM_GUEST:
*guest_blob = true;
break;
case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
*guest_blob = true;
fallthrough;
case VIRTGPU_BLOB_MEM_HOST3D:
*host3d_blob = true;
break;
default:
return -EINVAL;
}
if (*host3d_blob) {
if (!vgdev->has_virgl_3d)
return -EINVAL;
/* Must be dword aligned. */
if (rc_blob->cmd_size % 4 != 0)
return -EINVAL;
params->ctx_id = vfpriv->ctx_id;
params->blob_id = rc_blob->blob_id;
} else {
if (rc_blob->blob_id != 0)
return -EINVAL;
if (rc_blob->cmd_size != 0)
return -EINVAL;
}
params->blob_mem = rc_blob->blob_mem;
params->size = rc_blob->size;
params->blob = true;
params->blob_flags = rc_blob->blob_flags;
return 0;
}
static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
int ret = 0;
uint32_t handle = 0;
bool guest_blob = false;
bool host3d_blob = false;
struct drm_gem_object *obj;
struct virtio_gpu_object *bo;
struct virtio_gpu_object_params params = { 0 };
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_resource_create_blob *rc_blob = data;
if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob,
&guest_blob, &host3d_blob))
return -EINVAL;
if (vgdev->has_virgl_3d)
virtio_gpu_create_context(dev, file);
if (rc_blob->cmd_size) {
void *buf;
buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
rc_blob->cmd_size);
if (IS_ERR(buf))
return PTR_ERR(buf);
virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
vfpriv->ctx_id, NULL, NULL);
}
if (guest_blob)
ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL);
else if (!guest_blob && host3d_blob)
ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo);
else
return -EINVAL;
if (ret < 0)
return ret;
bo->guest_blob = guest_blob;
bo->host3d_blob = host3d_blob;
bo->blob_mem = rc_blob->blob_mem;
bo->blob_flags = rc_blob->blob_flags;
obj = &bo->base.base;
if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
if (ret) {
drm_gem_object_release(obj);
return ret;
}
}
ret = drm_gem_handle_create(file, obj, &handle);
if (ret) {
drm_gem_object_release(obj);
return ret;
}
rc_blob->res_handle = bo->hw_res_handle;
rc_blob->bo_handle = handle;
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
* the obj. Otherwise userspace could guess the handle and
* race closing it from another thread.
*/
drm_gem_object_put(obj);
return 0;
}
static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
int ret = 0;
uint32_t num_params, i, param, value;
uint64_t valid_ring_mask;
size_t len;
struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_context_init *args = data;
num_params = args->num_params;
len = num_params * sizeof(struct drm_virtgpu_context_set_param);
if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
return -EINVAL;
/* Number of unique parameters supported at this time. */
if (num_params > 3)
return -EINVAL;
ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
len);
if (IS_ERR(ctx_set_params))
return PTR_ERR(ctx_set_params);
mutex_lock(&vfpriv->context_lock);
if (vfpriv->context_created) {
ret = -EEXIST;
goto out_unlock;
}
for (i = 0; i < num_params; i++) {
param = ctx_set_params[i].param;
value = ctx_set_params[i].value;
switch (param) {
case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
if (value > MAX_CAPSET_ID) {
ret = -EINVAL;
goto out_unlock;
}
if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
ret = -EINVAL;
goto out_unlock;
}
/* Context capset ID already set */
if (vfpriv->context_init &
VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
ret = -EINVAL;
goto out_unlock;
}
vfpriv->context_init |= value;
break;
case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
if (vfpriv->base_fence_ctx) {
ret = -EINVAL;
goto out_unlock;
}
if (value > MAX_RINGS) {
ret = -EINVAL;
goto out_unlock;
}
vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
vfpriv->num_rings = value;
break;
case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
if (vfpriv->ring_idx_mask) {
ret = -EINVAL;
goto out_unlock;
}
vfpriv->ring_idx_mask = value;
break;
default:
ret = -EINVAL;
goto out_unlock;
}
}
if (vfpriv->ring_idx_mask) {
valid_ring_mask = 0;
for (i = 0; i < vfpriv->num_rings; i++)
valid_ring_mask |= 1ULL << i;
if (~valid_ring_mask & vfpriv->ring_idx_mask) {
ret = -EINVAL;
goto out_unlock;
}
}
virtio_gpu_create_context_locked(vgdev, vfpriv);
virtio_gpu_notify(vgdev);
out_unlock:
mutex_unlock(&vfpriv->context_lock);
kfree(ctx_set_params);
return ret;
}
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
virtio_gpu_resource_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
DRM_RENDER_ALLOW),
/* make transfer async to the main ring? - no sure, can we
* thread these in the underlying GL
*/
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
virtio_gpu_transfer_from_host_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
virtio_gpu_transfer_to_host_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
DRM_RENDER_ALLOW),
};
| linux-master | drivers/gpu/drm/virtio/virtgpu_ioctl.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Authors:
* Dave Airlie
* Alon Levy
*/
#include <linux/dma-fence-unwrap.h>
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
struct virtio_gpu_submit_post_dep {
struct drm_syncobj *syncobj;
struct dma_fence_chain *chain;
u64 point;
};
struct virtio_gpu_submit {
struct virtio_gpu_submit_post_dep *post_deps;
unsigned int num_out_syncobjs;
struct drm_syncobj **in_syncobjs;
unsigned int num_in_syncobjs;
struct virtio_gpu_object_array *buflist;
struct drm_virtgpu_execbuffer *exbuf;
struct virtio_gpu_fence *out_fence;
struct virtio_gpu_fpriv *vfpriv;
struct virtio_gpu_device *vgdev;
struct sync_file *sync_file;
struct drm_file *file;
int out_fence_fd;
u64 fence_ctx;
u32 ring_idx;
void *buf;
};
static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
struct dma_fence *in_fence)
{
u32 context = submit->fence_ctx + submit->ring_idx;
if (dma_fence_match_context(in_fence, context))
return 0;
return dma_fence_wait(in_fence, true);
}
static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
struct dma_fence *fence)
{
struct dma_fence_unwrap itr;
struct dma_fence *f;
int err;
dma_fence_unwrap_for_each(f, &itr, fence) {
err = virtio_gpu_do_fence_wait(submit, f);
if (err)
return err;
}
return 0;
}
static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
u32 nr_syncobjs)
{
u32 i = nr_syncobjs;
while (i--) {
if (syncobjs[i])
drm_syncobj_put(syncobjs[i]);
}
kvfree(syncobjs);
}
static int
virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
{
struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
size_t syncobj_stride = exbuf->syncobj_stride;
u32 num_in_syncobjs = exbuf->num_in_syncobjs;
struct drm_syncobj **syncobjs;
int ret = 0, i;
if (!num_in_syncobjs)
return 0;
/*
* kvalloc at first tries to allocate memory using kmalloc and
* falls back to vmalloc only on failure. It also uses __GFP_NOWARN
* internally for allocations larger than a page size, preventing
* storm of KMSG warnings.
*/
syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
if (!syncobjs)
return -ENOMEM;
for (i = 0; i < num_in_syncobjs; i++) {
u64 address = exbuf->in_syncobjs + i * syncobj_stride;
struct dma_fence *fence;
memset(&syncobj_desc, 0, sizeof(syncobj_desc));
if (copy_from_user(&syncobj_desc,
u64_to_user_ptr(address),
min(syncobj_stride, sizeof(syncobj_desc)))) {
ret = -EFAULT;
break;
}
if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
ret = -EINVAL;
break;
}
ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
syncobj_desc.point, 0, &fence);
if (ret)
break;
ret = virtio_gpu_dma_fence_wait(submit, fence);
dma_fence_put(fence);
if (ret)
break;
if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
syncobjs[i] = drm_syncobj_find(submit->file,
syncobj_desc.handle);
if (!syncobjs[i]) {
ret = -EINVAL;
break;
}
}
}
if (ret) {
virtio_gpu_free_syncobjs(syncobjs, i);
return ret;
}
submit->num_in_syncobjs = num_in_syncobjs;
submit->in_syncobjs = syncobjs;
return ret;
}
static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
u32 nr_syncobjs)
{
u32 i;
for (i = 0; i < nr_syncobjs; i++) {
if (syncobjs[i])
drm_syncobj_replace_fence(syncobjs[i], NULL);
}
}
static void
virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
u32 nr_syncobjs)
{
u32 i = nr_syncobjs;
while (i--) {
kfree(post_deps[i].chain);
drm_syncobj_put(post_deps[i].syncobj);
}
kvfree(post_deps);
}
static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
{
struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
struct virtio_gpu_submit_post_dep *post_deps;
u32 num_out_syncobjs = exbuf->num_out_syncobjs;
size_t syncobj_stride = exbuf->syncobj_stride;
int ret = 0, i;
if (!num_out_syncobjs)
return 0;
post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
if (!post_deps)
return -ENOMEM;
for (i = 0; i < num_out_syncobjs; i++) {
u64 address = exbuf->out_syncobjs + i * syncobj_stride;
memset(&syncobj_desc, 0, sizeof(syncobj_desc));
if (copy_from_user(&syncobj_desc,
u64_to_user_ptr(address),
min(syncobj_stride, sizeof(syncobj_desc)))) {
ret = -EFAULT;
break;
}
post_deps[i].point = syncobj_desc.point;
if (syncobj_desc.flags) {
ret = -EINVAL;
break;
}
if (syncobj_desc.point) {
post_deps[i].chain = dma_fence_chain_alloc();
if (!post_deps[i].chain) {
ret = -ENOMEM;
break;
}
}
post_deps[i].syncobj = drm_syncobj_find(submit->file,
syncobj_desc.handle);
if (!post_deps[i].syncobj) {
kfree(post_deps[i].chain);
ret = -EINVAL;
break;
}
}
if (ret) {
virtio_gpu_free_post_deps(post_deps, i);
return ret;
}
submit->num_out_syncobjs = num_out_syncobjs;
submit->post_deps = post_deps;
return 0;
}
static void
virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
{
struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
if (post_deps) {
struct dma_fence *fence = &submit->out_fence->f;
u32 i;
for (i = 0; i < submit->num_out_syncobjs; i++) {
if (post_deps[i].chain) {
drm_syncobj_add_point(post_deps[i].syncobj,
post_deps[i].chain,
fence, post_deps[i].point);
post_deps[i].chain = NULL;
} else {
drm_syncobj_replace_fence(post_deps[i].syncobj,
fence);
}
}
}
}
static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct drm_file *file,
struct virtio_gpu_fence *fence,
u32 ring_idx)
{
struct virtio_gpu_fence_event *e = NULL;
int ret;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
e->event.length = sizeof(e->event);
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
if (ret) {
kfree(e);
return ret;
}
fence->e = e;
return 0;
}
static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
{
struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
u32 *bo_handles;
if (!exbuf->num_bo_handles)
return 0;
bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles),
GFP_KERNEL);
if (!bo_handles)
return -ENOMEM;
if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
exbuf->num_bo_handles * sizeof(*bo_handles))) {
kvfree(bo_handles);
return -EFAULT;
}
submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
exbuf->num_bo_handles);
if (!submit->buflist) {
kvfree(bo_handles);
return -ENOENT;
}
kvfree(bo_handles);
return 0;
}
static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
{
virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
if (!IS_ERR(submit->buf))
kvfree(submit->buf);
if (submit->buflist)
virtio_gpu_array_put_free(submit->buflist);
if (submit->out_fence_fd >= 0)
put_unused_fd(submit->out_fence_fd);
if (submit->out_fence)
dma_fence_put(&submit->out_fence->f);
if (submit->sync_file)
fput(submit->sync_file->file);
}
static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
{
virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
submit->vfpriv->ctx_id, submit->buflist,
submit->out_fence);
virtio_gpu_notify(submit->vgdev);
}
static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
{
submit->buf = NULL;
submit->buflist = NULL;
submit->sync_file = NULL;
submit->out_fence_fd = -1;
}
static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
struct drm_virtgpu_execbuffer *exbuf,
struct drm_device *dev,
struct drm_file *file,
u64 fence_ctx, u32 ring_idx)
{
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fence *out_fence;
bool drm_fence_event;
int err;
memset(submit, 0, sizeof(*submit));
if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
drm_fence_event = true;
else
drm_fence_event = false;
if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
exbuf->num_out_syncobjs ||
exbuf->num_bo_handles ||
drm_fence_event)
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
else
out_fence = NULL;
if (drm_fence_event) {
err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
if (err) {
dma_fence_put(&out_fence->f);
return err;
}
}
submit->out_fence = out_fence;
submit->fence_ctx = fence_ctx;
submit->ring_idx = ring_idx;
submit->out_fence_fd = -1;
submit->vfpriv = vfpriv;
submit->vgdev = vgdev;
submit->exbuf = exbuf;
submit->file = file;
err = virtio_gpu_init_submit_buflist(submit);
if (err)
return err;
submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(submit->buf))
return PTR_ERR(submit->buf);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
err = get_unused_fd_flags(O_CLOEXEC);
if (err < 0)
return err;
submit->out_fence_fd = err;
submit->sync_file = sync_file_create(&out_fence->f);
if (!submit->sync_file)
return -ENOMEM;
}
return 0;
}
static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
{
int ret = 0;
if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence =
sync_file_get_fence(submit->exbuf->fence_fd);
if (!in_fence)
return -EINVAL;
/*
* Wait if the fence is from a foreign context, or if the
* fence array contains any fence from a foreign context.
*/
ret = virtio_gpu_dma_fence_wait(submit, in_fence);
dma_fence_put(in_fence);
}
return ret;
}
static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
{
if (submit->sync_file) {
submit->exbuf->fence_fd = submit->out_fence_fd;
fd_install(submit->out_fence_fd, submit->sync_file->file);
}
}
static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
{
if (submit->buflist)
return virtio_gpu_array_lock_resv(submit->buflist);
return 0;
}
int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
u64 fence_ctx = vgdev->fence_drv.context;
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_submit submit;
u32 ring_idx = 0;
int ret = -EINVAL;
if (!vgdev->has_virgl_3d)
return -ENOSYS;
if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)
return ret;
if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) {
if (exbuf->ring_idx >= vfpriv->num_rings)
return ret;
if (!vfpriv->base_fence_ctx)
return ret;
fence_ctx = vfpriv->base_fence_ctx;
ring_idx = exbuf->ring_idx;
}
virtio_gpu_create_context(dev, file);
ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
fence_ctx, ring_idx);
if (ret)
goto cleanup;
ret = virtio_gpu_parse_post_deps(&submit);
if (ret)
goto cleanup;
ret = virtio_gpu_parse_deps(&submit);
if (ret)
goto cleanup;
/*
* Await in-fences in the end of the job submission path to
* optimize the path by proceeding directly to the submission
* to virtio after the waits.
*/
ret = virtio_gpu_wait_in_fence(&submit);
if (ret)
goto cleanup;
ret = virtio_gpu_lock_buflist(&submit);
if (ret)
goto cleanup;
virtio_gpu_submit(&submit);
/*
* Set up usr-out data after submitting the job to optimize
* the job submission path.
*/
virtio_gpu_install_out_fence_fd(&submit);
virtio_gpu_process_post_deps(&submit);
virtio_gpu_complete_submit(&submit);
cleanup:
virtio_gpu_cleanup_submit(&submit);
return ret;
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_submit.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <trace/events/dma_fence.h>
#include "virtgpu_drv.h"
#define to_virtio_gpu_fence(x) \
container_of(x, struct virtio_gpu_fence, f)
static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
static bool virtio_gpu_fence_signaled(struct dma_fence *f)
{
/* leaked fence outside driver before completing
* initialization with virtio_gpu_fence_emit.
*/
WARN_ON_ONCE(f->seqno == 0);
return false;
}
static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
{
snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
}
static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
int size)
{
struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
snprintf(str, size, "%llu",
(u64)atomic64_read(&fence->drv->last_fence_id));
}
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
.fence_value_str = virtio_gpu_fence_value_str,
.timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
uint64_t base_fence_ctx,
uint32_t ring_idx)
{
uint64_t fence_context = base_fence_ctx + ring_idx;
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
GFP_KERNEL);
if (!fence)
return fence;
fence->drv = drv;
fence->ring_idx = ring_idx;
fence->emit_fence_info = !(base_fence_ctx == drv->context);
/* This only partially initializes the fence because the seqno is
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
fence_context, 0);
return fence;
}
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
trace_dma_fence_emit(&fence->f);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
/* Only currently defined fence param. */
if (fence->emit_fence_info) {
cmd_hdr->flags |=
cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
cmd_hdr->ring_idx = (u8)fence->ring_idx;
}
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
u64 fence_id)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *signaled, *curr, *tmp;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
if (fence_id != curr->fence_id)
continue;
signaled = curr;
/*
* Signal any fences with a strictly smaller sequence number
* than the current signaled fence.
*/
list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
/* dma-fence contexts must match */
if (signaled->f.context != curr->f.context)
continue;
if (!dma_fence_is_later(&signaled->f, &curr->f))
continue;
dma_fence_signal_locked(&curr->f);
if (curr->e) {
drm_send_event(vgdev->ddev, &curr->e->base);
curr->e = NULL;
}
list_del(&curr->node);
dma_fence_put(&curr->f);
}
dma_fence_signal_locked(&signaled->f);
if (signaled->e) {
drm_send_event(vgdev->ddev, &signaled->e->base);
signaled->e = NULL;
}
list_del(&signaled->node);
dma_fence_put(&signaled->f);
break;
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_fence.c |
/*
* Copyright 2014 Canonical
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Andreas Pokorny
*/
#include <drm/drm_prime.h>
#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
uuid_t *uuid)
{
struct drm_gem_object *obj = buf->priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
if (bo->uuid_state != STATE_OK)
return -ENODEV;
uuid_copy(uuid, &bo->uuid);
return 0;
}
static struct sg_table *
virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
if (virtio_gpu_is_vram(bo))
return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
return drm_gem_map_dma_buf(attach, dir);
}
static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
if (virtio_gpu_is_vram(bo)) {
virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
return;
}
drm_gem_unmap_dma_buf(attach, sgt, dir);
}
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
.cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
},
.device_attach = drm_gem_map_attach,
.get_uuid = virtgpu_virtio_get_uuid,
};
int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo)
{
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, &bo->base.base);
return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
}
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags)
{
struct dma_buf *buf;
struct drm_device *dev = obj->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
int ret = 0;
bool blob = bo->host3d_blob || bo->guest_blob;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (!blob) {
if (vgdev->has_resource_assign_uuid) {
ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
if (ret)
return ERR_PTR(ret);
virtio_gpu_notify(vgdev);
} else {
bo->uuid_state = STATE_ERR;
}
} else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
bo->uuid_state = STATE_ERR;
}
exp_info.ops = &virtgpu_dmabuf_ops.ops;
exp_info.size = obj->size;
exp_info.flags = flags;
exp_info.priv = obj;
exp_info.resv = obj->resv;
buf = virtio_dma_buf_export(&exp_info);
if (IS_ERR(buf))
return buf;
drm_dev_get(dev);
drm_gem_object_get(obj);
return buf;
}
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
struct drm_gem_object *obj;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from our own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
return obj;
}
}
return drm_gem_prime_import(dev, buf);
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
return ERR_PTR(-ENODEV);
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_prime.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Authors:
* Dave Airlie <[email protected]>
* Gerd Hoffmann <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <drm/drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include "virtgpu_drv.h"
static const struct drm_driver driver;
static int virtio_gpu_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
static int virtio_gpu_pci_quirk(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
int ret;
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
if (vga) {
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret)
return ret;
}
return 0;
}
static int virtio_gpu_probe(struct virtio_device *vdev)
{
struct drm_device *dev;
int ret;
if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1)
return -EINVAL;
if (virtio_gpu_modeset == 0)
return -EINVAL;
/*
* The virtio-gpu device is a virtual device that doesn't have DMA
* ops assigned to it, nor DMA mask set and etc. Its parent device
* is actual GPU device we want to use it for the DRM's device in
* order to benefit from using generic DRM APIs.
*/
dev = drm_dev_alloc(&driver, vdev->dev.parent);
if (IS_ERR(dev))
return PTR_ERR(dev);
vdev->priv = dev;
if (dev_is_pci(vdev->dev.parent)) {
ret = virtio_gpu_pci_quirk(dev);
if (ret)
goto err_free;
}
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
ret = drm_dev_register(dev, 0);
if (ret)
goto err_deinit;
drm_fbdev_generic_setup(vdev->priv, 32);
return 0;
err_deinit:
virtio_gpu_deinit(dev);
err_free:
drm_dev_put(dev);
return ret;
}
static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
virtio_gpu_deinit(dev);
drm_dev_put(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
schedule_work(&vgdev->config_changed_work);
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features[] = {
#ifdef __LITTLE_ENDIAN
/*
* Gallium command stream send by virgl is native endian.
* Because of that we only support little endian guests on
* little endian hosts.
*/
VIRTIO_GPU_F_VIRGL,
#endif
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
VIRTIO_GPU_F_RESOURCE_BLOB,
VIRTIO_GPU_F_CONTEXT_INIT,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
.config_changed = virtio_gpu_config_changed
};
module_virtio_driver(virtio_gpu_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio GPU driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Dave Airlie <[email protected]>");
MODULE_AUTHOR("Gerd Hoffmann <[email protected]>");
MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
/*
* If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
* out via drm_device::driver_features:
*/
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_create_object = virtio_gpu_create_object,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
.num_ioctls = DRM_VIRTIO_NUM_IOCTLS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.release = virtio_gpu_release,
};
| linux-master | drivers/gpu/drm/virtio/virtgpu_drv.c |
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
#define CREATE_TRACE_POINTS
#include "virtgpu_trace.h"
| linux-master | drivers/gpu/drm/virtio/virtgpu_trace_points.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include "virtgpu_drv.h"
static int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
if (ret < 0)
return ret;
ret = drm_gem_handle_create(file, &obj->base.base, &handle);
if (ret) {
drm_gem_object_release(&obj->base.base);
return ret;
}
*obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&obj->base.base);
*handle_p = handle;
return 0;
}
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gobj;
struct virtio_gpu_object_params params = { 0 };
struct virtio_gpu_device *vgdev = dev->dev_private;
int ret;
uint32_t pitch;
if (args->bpp != 32)
return -EINVAL;
pitch = args->width * 4;
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
params.width = args->width;
params.height = args->height;
params.size = args->size;
params.dumb = true;
if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) {
params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
params.blob = true;
}
ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj,
&args->handle);
if (ret)
goto fail;
args->pitch = pitch;
return ret;
fail:
return ret;
}
int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
BUG_ON(!offset_p);
gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
return -ENOENT;
*offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
drm_gem_object_put(gobj);
return 0;
}
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
goto out_notify;
/* the context might still be missing when the first ioctl is
* DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
*/
virtio_gpu_create_context(obj->dev, file);
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
out_notify:
virtio_gpu_notify(vgdev);
return 0;
}
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file)
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return;
objs = virtio_gpu_array_alloc(1);
if (!objs)
return;
virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
objs);
virtio_gpu_notify(vgdev);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
if (!objs)
return NULL;
objs->nents = 0;
objs->total = nents;
return objs;
}
static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
{
kfree(objs);
}
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
{
struct virtio_gpu_object_array *objs;
u32 i;
objs = virtio_gpu_array_alloc(nents);
if (!objs)
return NULL;
for (i = 0; i < nents; i++) {
objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
if (!objs->objs[i]) {
objs->nents = i;
virtio_gpu_array_put_free(objs);
return NULL;
}
}
objs->nents = i;
return objs;
}
void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
struct drm_gem_object *obj)
{
if (WARN_ON_ONCE(objs->nents == objs->total))
return;
drm_gem_object_get(obj);
objs->objs[objs->nents] = obj;
objs->nents++;
}
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
{
unsigned int i;
int ret;
if (objs->nents == 1) {
ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
} else {
ret = drm_gem_lock_reservations(objs->objs, objs->nents,
&objs->ticket);
}
if (ret)
return ret;
for (i = 0; i < objs->nents; ++i) {
ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
if (ret) {
virtio_gpu_array_unlock_resv(objs);
return ret;
}
}
return ret;
}
void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
{
if (objs->nents == 1) {
dma_resv_unlock(objs->objs[0]->resv);
} else {
drm_gem_unlock_reservations(objs->objs, objs->nents,
&objs->ticket);
}
}
void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
struct dma_fence *fence)
{
int i;
for (i = 0; i < objs->nents; i++)
dma_resv_add_fence(objs->objs[i]->resv, fence,
DMA_RESV_USAGE_WRITE);
}
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
{
u32 i;
if (!objs)
return;
for (i = 0; i < objs->nents; i++)
drm_gem_object_put(objs->objs[i]);
virtio_gpu_array_free(objs);
}
void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs)
{
spin_lock(&vgdev->obj_free_lock);
list_add_tail(&objs->next, &vgdev->obj_free_list);
spin_unlock(&vgdev->obj_free_lock);
schedule_work(&vgdev->obj_free_work);
}
void virtio_gpu_array_put_free_work(struct work_struct *work)
{
struct virtio_gpu_device *vgdev =
container_of(work, struct virtio_gpu_device, obj_free_work);
struct virtio_gpu_object_array *objs;
spin_lock(&vgdev->obj_free_lock);
while (!list_empty(&vgdev->obj_free_list)) {
objs = list_first_entry(&vgdev->obj_free_list,
struct virtio_gpu_object_array, next);
list_del(&objs->next);
spin_unlock(&vgdev->obj_free_lock);
virtio_gpu_array_put_free(objs);
spin_lock(&vgdev->obj_free_lock);
}
spin_unlock(&vgdev->obj_free_lock);
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_gem.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include "virtgpu_drv.h"
static const uint32_t virtio_gpu_formats[] = {
DRM_FORMAT_HOST_XRGB8888,
};
static const uint32_t virtio_gpu_cursor_formats[] = {
DRM_FORMAT_HOST_ARGB8888,
};
uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
{
uint32_t format;
switch (drm_fourcc) {
case DRM_FORMAT_XRGB8888:
format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
break;
case DRM_FORMAT_ARGB8888:
format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
break;
case DRM_FORMAT_BGRX8888:
format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
break;
case DRM_FORMAT_BGRA8888:
format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
break;
default:
/*
* This should not happen, we handle everything listed
* in virtio_gpu_formats[].
*/
format = 0;
break;
}
WARN_ON(format == 0);
return format;
}
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
struct drm_crtc_state *crtc_state;
int ret;
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
is_cursor, true);
return ret;
}
static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
struct drm_plane_state *state,
struct drm_rect *rect)
{
struct virtio_gpu_object *bo =
gem_to_virtio_gpu_obj(state->fb->obj[0]);
struct virtio_gpu_object_array *objs;
uint32_t w = rect->x2 - rect->x1;
uint32_t h = rect->y2 - rect->y1;
uint32_t x = rect->x1;
uint32_t y = rect->y1;
uint32_t off = x * state->fb->format->cpp[0] +
y * state->fb->pitches[0];
objs = virtio_gpu_array_alloc(1);
if (!objs)
return;
virtio_gpu_array_add_obj(objs, &bo->base.base);
virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
objs, NULL);
}
static void virtio_gpu_resource_flush(struct drm_plane *plane,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
if (vgfb->fence) {
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, objs, vgfb->fence);
virtio_gpu_notify(vgdev);
dma_fence_wait_timeout(&vgfb->fence->f, true,
msecs_to_jiffies(50));
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
} else {
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, NULL, NULL);
virtio_gpu_notify(vgdev);
}
}
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_object *bo;
struct drm_rect rect;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
if (old_state->crtc)
output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
if (WARN_ON(!output))
return;
if (!plane->state->fb || !output->crtc.state->active) {
DRM_DEBUG("nofb\n");
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
0, 0);
virtio_gpu_notify(vgdev);
return;
}
if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
return;
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
if (bo->dumb)
virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
if (plane->state->fb != old_state->fb ||
plane->state->src_w != old_state->src_w ||
plane->state->src_h != old_state->src_h ||
plane->state->src_x != old_state->src_x ||
plane->state->src_y != old_state->src_y ||
output->needs_modeset) {
output->needs_modeset = false;
DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
bo->hw_res_handle,
plane->state->crtc_w, plane->state->crtc_h,
plane->state->crtc_x, plane->state->crtc_y,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
if (bo->host3d_blob || bo->guest_blob) {
virtio_gpu_cmd_set_scanout_blob
(vgdev, output->index, bo,
plane->state->fb,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
} else {
virtio_gpu_cmd_set_scanout(vgdev, output->index,
bo->hw_res_handle,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
}
}
virtio_gpu_resource_flush(plane,
rect.x1,
rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1);
}
static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
if (bo->dumb && (plane->state->fb != new_state->fb)) {
vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0);
if (!vgfb->fence)
return -ENOMEM;
}
return 0;
}
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct virtio_gpu_framebuffer *vgfb;
if (!state->fb)
return;
vgfb = to_virtio_gpu_framebuffer(state->fb);
if (vgfb->fence) {
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
}
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
if (old_state->crtc)
output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
if (WARN_ON(!output))
return;
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
handle = 0;
}
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
0, 0, objs, vgfb->fence);
virtio_gpu_notify(vgdev);
dma_fence_wait(&vgfb->fence->f, true);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
}
if (plane->state->fb != old_state->fb) {
DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
plane->state->crtc_x,
plane->state->crtc_y,
plane->state->fb ? plane->state->fb->hot_x : 0,
plane->state->fb ? plane->state->fb->hot_y : 0);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
output->cursor.resource_id = cpu_to_le32(handle);
if (plane->state->fb) {
output->cursor.hot_x =
cpu_to_le32(plane->state->fb->hot_x);
output->cursor.hot_y =
cpu_to_le32(plane->state->fb->hot_y);
} else {
output->cursor.hot_x = cpu_to_le32(0);
output->cursor.hot_y = cpu_to_le32(0);
}
} else {
DRM_DEBUG("move +%d+%d\n",
plane->state->crtc_x,
plane->state->crtc_y);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
}
output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
virtio_gpu_cursor_ping(vgdev, output);
}
static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
.prepare_fb = virtio_gpu_plane_prepare_fb,
.cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_primary_plane_update,
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
.prepare_fb = virtio_gpu_plane_prepare_fb,
.cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_cursor_plane_update,
};
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index)
{
struct drm_device *dev = vgdev->ddev;
const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
const uint32_t *formats;
int nformats;
if (type == DRM_PLANE_TYPE_CURSOR) {
formats = virtio_gpu_cursor_formats;
nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
funcs = &virtio_gpu_cursor_helper_funcs;
} else {
formats = virtio_gpu_formats;
nformats = ARRAY_SIZE(virtio_gpu_formats);
funcs = &virtio_gpu_primary_helper_funcs;
}
plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
1 << index, &virtio_gpu_plane_funcs,
formats, nformats, NULL, type, NULL);
if (IS_ERR(plane))
return plane;
drm_plane_helper_add(plane, funcs);
if (type == DRM_PLANE_TYPE_PRIMARY)
drm_plane_enable_fb_damage_clips(plane);
return plane;
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_plane.c |
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
#include <linux/dma-mapping.h>
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
bool unmap;
if (bo->created) {
spin_lock(&vgdev->host_visible_lock);
unmap = drm_mm_node_allocated(&vram->vram_node);
spin_unlock(&vgdev->host_visible_lock);
if (unmap)
virtio_gpu_cmd_unmap(vgdev, bo);
virtio_gpu_cmd_unref_resource(vgdev, bo);
virtio_gpu_notify(vgdev);
return;
}
}
static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
unsigned long vm_size = vma->vm_end - vma->vm_start;
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return -EINVAL;
wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
if (vram->map_state != STATE_OK)
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;
if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Partial mappings of GEM buffers don't happen much in practice. */
if (vm_size != vram->vram_node.size)
return -EINVAL;
ret = io_remap_pfn_range(vma, vma->vm_start,
vram->vram_node.start >> PAGE_SHIFT,
vm_size, vma->vm_page_prot);
return ret;
}
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
struct device *dev,
enum dma_data_direction dir)
{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
struct sg_table *sgt;
dma_addr_t addr;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
// Virtio devices can access the dma-buf via its UUID. Return a stub
// sg_table so the dma-buf API still works.
if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
ret = -EIO;
goto out;
}
return sgt;
}
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (ret)
goto out;
addr = dma_map_resource(dev, vram->vram_node.start,
vram->vram_node.size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
ret = dma_mapping_error(dev, addr);
if (ret)
goto out;
sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
sg_dma_address(sgt->sgl) = addr;
sg_dma_len(sgt->sgl) = vram->vram_node.size;
return sgt;
out:
sg_free_table(sgt);
kfree(sgt);
return ERR_PTR(ret);
}
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
if (sgt->nents) {
dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
sg_dma_len(sgt->sgl), dir,
DMA_ATTR_SKIP_CPU_SYNC);
}
sg_free_table(sgt);
kfree(sgt);
}
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
.free = virtio_gpu_vram_free,
.mmap = virtio_gpu_vram_mmap,
.export = virtgpu_gem_prime_export,
};
bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
{
return bo->base.base.funcs == &virtio_gpu_vram_funcs;
}
static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
{
int ret;
uint64_t offset;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
if (!vgdev->has_host_visible)
return -EINVAL;
spin_lock(&vgdev->host_visible_lock);
ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
bo->base.base.size);
spin_unlock(&vgdev->host_visible_lock);
if (ret)
return ret;
objs = virtio_gpu_array_alloc(1);
if (!objs) {
ret = -ENOMEM;
goto err_remove_node;
}
virtio_gpu_array_add_obj(objs, &bo->base.base);
/*TODO: Add an error checking helper function in drm_mm.h */
offset = vram->vram_node.start - vgdev->host_visible_region.addr;
ret = virtio_gpu_cmd_map(vgdev, objs, offset);
if (ret) {
virtio_gpu_array_put_free(objs);
goto err_remove_node;
}
return 0;
err_remove_node:
spin_lock(&vgdev->host_visible_lock);
drm_mm_remove_node(&vram->vram_node);
spin_unlock(&vgdev->host_visible_lock);
return ret;
}
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr)
{
struct drm_gem_object *obj;
struct virtio_gpu_object_vram *vram;
int ret;
vram = kzalloc(sizeof(*vram), GFP_KERNEL);
if (!vram)
return -ENOMEM;
obj = &vram->base.base.base;
obj->funcs = &virtio_gpu_vram_funcs;
params->size = PAGE_ALIGN(params->size);
drm_gem_private_object_init(vgdev->ddev, obj, params->size);
/* Create fake offset */
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
kfree(vram);
return ret;
}
ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
if (ret) {
kfree(vram);
return ret;
}
virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
0);
if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
ret = virtio_gpu_vram_map(&vram->base);
if (ret) {
virtio_gpu_vram_free(obj);
return ret;
}
}
*bo_ptr = &vram->base;
return 0;
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_vram.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
#include "virtgpu_drv.h"
static void virtio_gpu_config_changed_work_func(struct work_struct *work)
{
struct virtio_gpu_device *vgdev =
container_of(work, struct virtio_gpu_device,
config_changed_work);
u32 events_read, events_clear = 0;
/* read the config space */
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
if (vgdev->num_scanouts) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
}
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
events_clear, &events_clear);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
void (*work_func)(struct work_struct *work))
{
spin_lock_init(&vgvq->qlock);
init_waitqueue_head(&vgvq->ack_queue);
INIT_WORK(&vgvq->dequeue_work, work_func);
}
static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
int num_capsets)
{
int i, ret;
bool invalid_capset_id = false;
struct drm_device *drm = vgdev->ddev;
vgdev->capsets = drmm_kcalloc(drm, num_capsets,
sizeof(struct virtio_gpu_drv_capset),
GFP_KERNEL);
if (!vgdev->capsets) {
DRM_ERROR("failed to allocate cap sets\n");
return;
}
for (i = 0; i < num_capsets; i++) {
virtio_gpu_cmd_get_capset_info(vgdev, i);
virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
/*
* Capability ids are defined in the virtio-gpu spec and are
* between 1 to 63, inclusive.
*/
if (!vgdev->capsets[i].id ||
vgdev->capsets[i].id > MAX_CAPSET_ID)
invalid_capset_id = true;
if (ret == 0)
DRM_ERROR("timed out waiting for cap set %d\n", i);
else if (invalid_capset_id)
DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
drmm_kfree(drm, vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
}
vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
i, vgdev->capsets[i].id,
vgdev->capsets[i].max_version,
vgdev->capsets[i].max_size);
}
vgdev->num_capsets = num_capsets;
}
int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
{
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
};
static const char * const names[] = { "control", "cursor" };
struct virtio_gpu_device *vgdev;
/* this will expand later */
struct virtqueue *vqs[2];
u32 num_scanouts, num_capsets;
int ret = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL);
if (!vgdev)
return -ENOMEM;
vgdev->ddev = dev;
dev->dev_private = vgdev;
vgdev->vdev = vdev;
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
spin_lock_init(&vgdev->host_visible_lock);
ida_init(&vgdev->ctx_id_ida);
ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func);
INIT_WORK(&vgdev->obj_free_work,
virtio_gpu_array_put_free_work);
INIT_LIST_HEAD(&vgdev->obj_free_list);
spin_lock_init(&vgdev->obj_free_lock);
#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
vgdev->has_indirect = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
vgdev->has_resource_assign_uuid = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
vgdev->has_resource_blob = true;
}
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
vgdev->host_visible_region.addr,
vgdev->host_visible_region.len,
dev_name(&vgdev->vdev->dev))) {
DRM_ERROR("Could not reserve host visible region\n");
ret = -EBUSY;
goto err_vqs;
}
DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
vgdev->has_host_visible = true;
drm_mm_init(&vgdev->host_visible_mm,
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
vgdev->has_context_init = true;
}
DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
vgdev->has_edid ? '+' : '-',
vgdev->has_resource_blob ? '+' : '-',
vgdev->has_host_visible ? '+' : '-');
DRM_INFO("features: %ccontext_init\n",
vgdev->has_context_init ? '+' : '-');
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
goto err_vqs;
}
vgdev->ctrlq.vq = vqs[0];
vgdev->cursorq.vq = vqs[1];
ret = virtio_gpu_alloc_vbufs(vgdev);
if (ret) {
DRM_ERROR("failed to alloc vbufs\n");
goto err_vbufs;
}
/* get display info */
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
DRM_INFO("KMS disabled\n");
vgdev->num_scanouts = 0;
vgdev->has_edid = false;
dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
} else {
DRM_INFO("number of scanouts: %d\n", num_scanouts);
}
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets);
DRM_INFO("number of cap sets: %d\n", num_capsets);
ret = virtio_gpu_modeset_init(vgdev);
if (ret) {
DRM_ERROR("modeset init failed\n");
goto err_scanouts;
}
virtio_device_ready(vgdev->vdev);
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
if (vgdev->num_scanouts) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
}
return 0;
err_scanouts:
virtio_gpu_free_vbufs(vgdev);
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
dev->dev_private = NULL;
return ret;
}
static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
kfree(cache_ent->caps_cache);
kfree(cache_ent);
}
}
void virtio_gpu_deinit(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->obj_free_work);
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
virtio_reset_device(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
}
void virtio_gpu_release(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
if (!vgdev)
return;
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
if (vgdev->has_host_visible)
drm_mm_takedown(&vgdev->host_visible_mm);
}
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
int handle;
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
return 0;
/* allocate a virt GPU context for this opener */
vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
if (!vfpriv)
return -ENOMEM;
mutex_init(&vfpriv->context_lock);
handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
if (handle < 0) {
kfree(vfpriv);
return handle;
}
vfpriv->ctx_id = handle + 1;
file->driver_priv = vfpriv;
return 0;
}
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
if (!vgdev->has_virgl_3d)
return;
if (vfpriv->context_created) {
virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
virtio_gpu_notify(vgdev);
}
ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv);
file->driver_priv = NULL;
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_kms.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
static int virtio_gpu_virglrenderer_workaround = 1;
module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
{
if (virtio_gpu_virglrenderer_workaround) {
/*
* Hack to avoid re-using resource IDs.
*
* virglrenderer versions up to (and including) 0.7.0
* can't deal with that. virglrenderer commit
* "f91a9dd35715 Fix unlinking resources from hash
* table." (Feb 2019) fixes the bug.
*/
static atomic_t seqno = ATOMIC_INIT(0);
int handle = atomic_inc_return(&seqno);
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
if (handle < 0)
return handle;
*resid = handle + 1;
}
return 0;
}
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
if (!virtio_gpu_virglrenderer_workaround) {
ida_free(&vgdev->resource_ida, id - 1);
}
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
if (virtio_gpu_is_shmem(bo)) {
drm_gem_shmem_free(&bo->base);
} else if (virtio_gpu_is_vram(bo)) {
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
spin_lock(&vgdev->host_visible_lock);
if (drm_mm_node_allocated(&vram->vram_node))
drm_mm_remove_node(&vram->vram_node);
spin_unlock(&vgdev->host_visible_lock);
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
}
}
static void virtio_gpu_free_object(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
virtio_gpu_notify(vgdev);
/* completion handler calls virtio_gpu_cleanup_object() */
return;
}
virtio_gpu_cleanup_object(bo);
}
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
.print_info = drm_gem_shmem_object_print_info,
.export = virtgpu_gem_prime_export,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
{
return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
}
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
struct virtio_gpu_object_shmem *shmem;
struct drm_gem_shmem_object *dshmem;
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
return ERR_PTR(-ENOMEM);
dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs;
return &dshmem->base;
}
static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_mem_entry **ents,
unsigned int *nents)
{
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct scatterlist *sg;
struct sg_table *pages;
int si;
pages = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
if (use_dma_api)
*nents = pages->nents;
else
*nents = pages->orig_nents;
*ents = kvmalloc_array(*nents,
sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!(*ents)) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
if (use_dma_api) {
for_each_sgtable_dma_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
(*ents)[si].padding = 0;
}
} else {
for_each_sgtable_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
}
}
return 0;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
struct virtio_gpu_mem_entry *ents = NULL;
unsigned int nents;
int ret;
*bo_ptr = NULL;
params->size = roundup(params->size, PAGE_SIZE);
shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
if (IS_ERR(shmem_obj))
return PTR_ERR(shmem_obj);
bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
if (ret < 0)
goto err_free_gem;
bo->dumb = params->dumb;
ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
if (ret != 0)
goto err_put_id;
if (fence) {
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
if (!objs)
goto err_free_entry;
virtio_gpu_array_add_obj(objs, &bo->base.base);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_objs;
}
if (params->blob) {
if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
bo->guest_blob = true;
virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
ents, nents);
} else if (params->virgl) {
virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
objs, fence);
virtio_gpu_object_attach(vgdev, bo, ents, nents);
} else {
virtio_gpu_cmd_create_resource(vgdev, bo, params,
objs, fence);
virtio_gpu_object_attach(vgdev, bo, ents, nents);
}
*bo_ptr = bo;
return 0;
err_put_objs:
virtio_gpu_array_put_free(objs);
err_free_entry:
kvfree(ents);
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
drm_gem_shmem_free(shmem_obj);
return ret;
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_object.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Authors:
* Dave Airlie <[email protected]>
* Gerd Hoffmann <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-mapping.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <drm/drm_edid.h>
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
#define MAX_INLINE_CMD_SIZE 96
#define MAX_INLINE_RESP_SIZE 24
#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
static void convert_to_hw_box(struct virtio_gpu_box *dst,
const struct drm_virtgpu_3d_box *src)
{
dst->x = cpu_to_le32(src->x);
dst->y = cpu_to_le32(src->y);
dst->z = cpu_to_le32(src->z);
dst->w = cpu_to_le32(src->w);
dst->h = cpu_to_le32(src->h);
dst->d = cpu_to_le32(src->d);
}
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
schedule_work(&vgdev->ctrlq.dequeue_work);
}
void virtio_gpu_cursor_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
schedule_work(&vgdev->cursorq.dequeue_work);
}
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
{
vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
VBUFFER_SIZE,
__alignof__(struct virtio_gpu_vbuffer),
0, NULL);
if (!vgdev->vbufs)
return -ENOMEM;
return 0;
}
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
{
kmem_cache_destroy(vgdev->vbufs);
vgdev->vbufs = NULL;
}
static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
int size, int resp_size, void *resp_buf,
virtio_gpu_resp_cb resp_cb)
{
struct virtio_gpu_vbuffer *vbuf;
vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE ||
size < sizeof(struct virtio_gpu_ctrl_hdr));
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
vbuf->size = size;
vbuf->resp_cb = resp_cb;
vbuf->resp_size = resp_size;
if (resp_size <= MAX_INLINE_RESP_SIZE)
vbuf->resp_buf = (void *)vbuf->buf + size;
else
vbuf->resp_buf = resp_buf;
BUG_ON(!vbuf->resp_buf);
return vbuf;
}
static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
/* this assumes a vbuf contains a command that starts with a
* virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
* virtqueues.
*/
return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}
static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer **vbuffer_p)
{
struct virtio_gpu_vbuffer *vbuf;
vbuf = virtio_gpu_get_vbuf
(vgdev, sizeof(struct virtio_gpu_update_cursor),
0, NULL, NULL);
if (IS_ERR(vbuf)) {
*vbuffer_p = NULL;
return ERR_CAST(vbuf);
}
*vbuffer_p = vbuf;
return (struct virtio_gpu_update_cursor *)vbuf->buf;
}
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
virtio_gpu_resp_cb cb,
struct virtio_gpu_vbuffer **vbuffer_p,
int cmd_size, int resp_size,
void *resp_buf)
{
struct virtio_gpu_vbuffer *vbuf;
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer **vbuffer_p,
int size)
{
return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
sizeof(struct virtio_gpu_ctrl_hdr),
NULL);
}
static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer **vbuffer_p,
int size,
virtio_gpu_resp_cb cb)
{
return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
sizeof(struct virtio_gpu_ctrl_hdr),
NULL);
}
static void free_vbuf(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
kfree(vbuf->resp_buf);
kvfree(vbuf->data_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
{
struct virtio_gpu_vbuffer *vbuf;
unsigned int len;
int freed = 0;
while ((vbuf = virtqueue_get_buf(vq, &len))) {
list_add_tail(&vbuf->list, reclaim_list);
freed++;
}
if (freed == 0)
DRM_DEBUG("Huh? zero vbufs reclaimed");
}
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
{
struct virtio_gpu_device *vgdev =
container_of(work, struct virtio_gpu_device,
ctrlq.dequeue_work);
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
struct virtio_gpu_ctrl_hdr *resp;
u64 fence_id;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->ctrlq.qlock);
do {
virtqueue_disable_cb(vgdev->ctrlq.vq);
reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock);
list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
le32_to_cpu(cmd->type));
} else
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
fence_id = le64_to_cpu(resp->fence_id);
virtio_gpu_fence_event_process(vgdev, fence_id);
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
}
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
{
struct virtio_gpu_device *vgdev =
container_of(work, struct virtio_gpu_device,
cursorq.dequeue_work);
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->cursorq.qlock);
do {
virtqueue_disable_cb(vgdev->cursorq.vq);
reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
struct virtio_gpu_ctrl_hdr *resp =
(struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
wake_up(&vgdev->cursorq.ack_queue);
}
/* Create sg_table from a vmalloc'd buffer. */
static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
{
int ret, s, i;
struct sg_table *sgt;
struct scatterlist *sg;
struct page *pg;
if (WARN_ON(!PAGE_ALIGNED(data)))
return NULL;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return NULL;
*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
if (ret) {
kfree(sgt);
return NULL;
}
for_each_sgtable_sg(sgt, sg, i) {
pg = vmalloc_to_page(data);
if (!pg) {
sg_free_table(sgt);
kfree(sgt);
return NULL;
}
s = min_t(int, PAGE_SIZE, size);
sg_set_page(sg, pg, s, 0);
size -= s;
data += s;
}
return sgt;
}
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence,
int elemcnt,
struct scatterlist **sgs,
int outcnt,
int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int ret, idx;
if (!drm_dev_enter(vgdev->ddev, &idx)) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
free_vbuf(vgdev, vbuf);
return -ENODEV;
}
if (vgdev->has_indirect)
elemcnt = 1;
again:
spin_lock(&vgdev->ctrlq.qlock);
if (vq->num_free < elemcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
virtio_gpu_notify(vgdev);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
goto again;
}
/* now that the position of the vbuf in the virtqueue is known, we can
* finally set the fence id
*/
if (fence) {
virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
fence);
if (vbuf->objs) {
virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
virtio_gpu_array_unlock_resv(vbuf->objs);
}
}
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret);
vbuf->seqno = ++vgdev->ctrlq.seqno;
trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands);
spin_unlock(&vgdev->ctrlq.qlock);
drm_dev_exit(idx);
return 0;
}
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence)
{
struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL;
int elemcnt = 0, outcnt = 0, incnt = 0, ret;
/* set up vcmd */
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
elemcnt++;
sgs[outcnt] = &vcmd;
outcnt++;
/* set up vout */
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
return -ENOMEM;
}
elemcnt += sg_ents;
sgs[outcnt] = sgt->sgl;
} else {
sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
elemcnt++;
sgs[outcnt] = &vout;
}
outcnt++;
}
/* set up vresp */
if (vbuf->resp_size) {
sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
elemcnt++;
sgs[outcnt + incnt] = &vresp;
incnt++;
}
ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
incnt);
if (sgt) {
sg_free_table(sgt);
kfree(sgt);
}
return ret;
}
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
bool notify;
if (!atomic_read(&vgdev->pending_commands))
return;
spin_lock(&vgdev->ctrlq.qlock);
atomic_set(&vgdev->pending_commands, 0);
notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
spin_unlock(&vgdev->ctrlq.qlock);
if (notify)
virtqueue_notify(vgdev->ctrlq.vq);
}
static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
int idx, ret, outcnt;
bool notify;
if (!drm_dev_enter(vgdev->ddev, &idx)) {
free_vbuf(vgdev, vbuf);
return;
}
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
outcnt = 1;
spin_lock(&vgdev->cursorq.qlock);
retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq,
virtio_gpu_vbuf_ctrl_hdr(vbuf),
vbuf->seqno);
notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
if (notify)
virtqueue_notify(vq);
drm_dev_exit(idx);
}
/* just create gem objects for userspace and long lived objects,
* just use dma_alloced pages for the queue objects?
*/
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->format = cpu_to_le32(params->format);
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_object *bo;
bo = vbuf->resp_cb_data;
vbuf->resp_cb_data = NULL;
virtio_gpu_cleanup_object(bo);
}
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo)
{
struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
int ret;
cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
virtio_gpu_cmd_unref_cb);
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
vbuf->resp_cb_data = bo;
ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
if (ret < 0)
virtio_gpu_cleanup_object(bo);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y)
{
struct virtio_gpu_set_scanout *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
cmd_p->resource_id = cpu_to_le32(resource_id);
cmd_p->scanout_id = cpu_to_le32(scanout_id);
cmd_p->r.width = cpu_to_le32(width);
cmd_p->r.height = cpu_to_le32(height);
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_flush *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
cmd_p->resource_id = cpu_to_le32(resource_id);
cmd_p->r.width = cpu_to_le32(width);
cmd_p->r.height = cpu_to_le32(height);
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->r.width = cpu_to_le32(width);
cmd_p->r.height = cpu_to_le32(height);
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_mem_entry *ents,
uint32_t nents,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
cmd_p->resource_id = cpu_to_le32(resource_id);
cmd_p->nr_entries = cpu_to_le32(nents);
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_resp_display_info *resp =
(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
int i;
spin_lock(&vgdev->display_info_lock);
for (i = 0; i < vgdev->num_scanouts; i++) {
vgdev->outputs[i].info = resp->pmodes[i];
if (resp->pmodes[i].enabled) {
DRM_DEBUG("output %d: %dx%d+%d+%d", i,
le32_to_cpu(resp->pmodes[i].r.width),
le32_to_cpu(resp->pmodes[i].r.height),
le32_to_cpu(resp->pmodes[i].r.x),
le32_to_cpu(resp->pmodes[i].r.y));
} else {
DRM_DEBUG("output %d: disabled", i);
}
}
vgdev->display_info_pending = false;
spin_unlock(&vgdev->display_info_lock);
wake_up(&vgdev->resp_wq);
if (!drm_helper_hpd_irq_event(vgdev->ddev))
drm_kms_helper_hotplug_event(vgdev->ddev);
}
static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_get_capset_info *cmd =
(struct virtio_gpu_get_capset_info *)vbuf->buf;
struct virtio_gpu_resp_capset_info *resp =
(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
int i = le32_to_cpu(cmd->capset_index);
spin_lock(&vgdev->display_info_lock);
if (vgdev->capsets) {
vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
} else {
DRM_ERROR("invalid capset memory.");
}
spin_unlock(&vgdev->display_info_lock);
wake_up(&vgdev->resp_wq);
}
static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_get_capset *cmd =
(struct virtio_gpu_get_capset *)vbuf->buf;
struct virtio_gpu_resp_capset *resp =
(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
struct virtio_gpu_drv_cap_cache *cache_ent;
spin_lock(&vgdev->display_info_lock);
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
cache_ent->id == le32_to_cpu(cmd->capset_id)) {
memcpy(cache_ent->caps_cache, resp->capset_data,
cache_ent->size);
/* Copy must occur before is_valid is signalled. */
smp_wmb();
atomic_set(&cache_ent->is_valid, 1);
break;
}
}
spin_unlock(&vgdev->display_info_lock);
wake_up_all(&vgdev->resp_wq);
}
static int virtio_get_edid_block(void *data, u8 *buf,
unsigned int block, size_t len)
{
struct virtio_gpu_resp_edid *resp = data;
size_t start = block * EDID_LENGTH;
if (start + len > le32_to_cpu(resp->size))
return -EINVAL;
memcpy(buf, resp->edid + start, len);
return 0;
}
static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_cmd_get_edid *cmd =
(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
struct virtio_gpu_resp_edid *resp =
(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
uint32_t scanout = le32_to_cpu(cmd->scanout);
struct virtio_gpu_output *output;
struct edid *new_edid, *old_edid;
if (scanout >= vgdev->num_scanouts)
return;
output = vgdev->outputs + scanout;
new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
drm_connector_update_edid_property(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
old_edid = output->edid;
output->edid = new_edid;
spin_unlock(&vgdev->display_info_lock);
kfree(old_edid);
wake_up(&vgdev->resp_wq);
}
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_ctrl_hdr *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
void *resp_buf;
resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
GFP_KERNEL);
if (!resp_buf)
return -ENOMEM;
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
resp_buf);
memset(cmd_p, 0, sizeof(*cmd_p));
vgdev->display_info_pending = true;
cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
{
struct virtio_gpu_get_capset_info *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
void *resp_buf;
resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
GFP_KERNEL);
if (!resp_buf)
return -ENOMEM;
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
resp_buf);
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
cmd_p->capset_index = cpu_to_le32(idx);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
int idx, int version,
struct virtio_gpu_drv_cap_cache **cache_p)
{
struct virtio_gpu_get_capset *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
struct virtio_gpu_drv_cap_cache *search_ent;
void *resp_buf;
*cache_p = NULL;
if (idx >= vgdev->num_capsets)
return -EINVAL;
if (version > vgdev->capsets[idx].max_version)
return -EINVAL;
cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
if (!cache_ent)
return -ENOMEM;
max_size = vgdev->capsets[idx].max_size;
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
if (!cache_ent->caps_cache) {
kfree(cache_ent);
return -ENOMEM;
}
resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
GFP_KERNEL);
if (!resp_buf) {
kfree(cache_ent->caps_cache);
kfree(cache_ent);
return -ENOMEM;
}
cache_ent->version = version;
cache_ent->id = vgdev->capsets[idx].id;
atomic_set(&cache_ent->is_valid, 0);
cache_ent->size = max_size;
spin_lock(&vgdev->display_info_lock);
/* Search while under lock in case it was added by another task. */
list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
if (search_ent->id == vgdev->capsets[idx].id &&
search_ent->version == version) {
*cache_p = search_ent;
break;
}
}
if (!*cache_p)
list_add_tail(&cache_ent->head, &vgdev->cap_cache);
spin_unlock(&vgdev->display_info_lock);
if (*cache_p) {
/* Entry was found, so free everything that was just created. */
kfree(resp_buf);
kfree(cache_ent->caps_cache);
kfree(cache_ent);
return 0;
}
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_capset) + max_size,
resp_buf);
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
cmd_p->capset_version = cpu_to_le32(version);
*cache_p = cache_ent;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_cmd_get_edid *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
void *resp_buf;
int scanout;
if (WARN_ON(!vgdev->has_edid))
return -EINVAL;
for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
GFP_KERNEL);
if (!resp_buf)
return -ENOMEM;
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
resp_buf);
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
cmd_p->scanout = cpu_to_le32(scanout);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
return 0;
}
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t context_init, uint32_t nlen,
const char *name)
{
struct virtio_gpu_ctx_create *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
cmd_p->context_init = cpu_to_le32(context_init);
strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id)
{
struct virtio_gpu_ctx_destroy *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
struct virtio_gpu_object_array *objs)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
struct virtio_gpu_object_array *objs)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->format = cpu_to_le32(params->format);
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
cmd_p->target = cpu_to_le32(params->target);
cmd_p->bind = cpu_to_le32(params->bind);
cmd_p->depth = cpu_to_le32(params->depth);
cmd_p->array_size = cpu_to_le32(params->array_size);
cmd_p->last_level = cpu_to_le32(params->last_level);
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
cmd_p->flags = cpu_to_le32(params->flags);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
uint32_t stride,
uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
cmd_p->stride = cpu_to_le32(stride);
cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
uint32_t stride,
uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
cmd_p->stride = cpu_to_le32(stride);
cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
uint32_t ctx_id,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->data_buf = data;
vbuf->data_size = data_size;
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->size = cpu_to_le32(data_size);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_update_cursor *cur_p;
output->cursor.pos.scanout_id = cpu_to_le32(output->index);
cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
memcpy(cur_p, &output->cursor, sizeof(output->cursor));
virtio_gpu_queue_cursor(vgdev, vbuf);
}
static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_object *obj =
gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
struct virtio_gpu_resp_resource_uuid *resp =
(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
uint32_t resp_type = le32_to_cpu(resp->hdr.type);
spin_lock(&vgdev->resource_export_lock);
WARN_ON(obj->uuid_state != STATE_INITIALIZING);
if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
obj->uuid_state == STATE_INITIALIZING) {
import_uuid(&obj->uuid, resp->uuid);
obj->uuid_state = STATE_OK;
} else {
obj->uuid_state = STATE_ERR;
}
spin_unlock(&vgdev->resource_export_lock);
wake_up_all(&vgdev->resp_wq);
}
int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_resource_assign_uuid *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_resp_resource_uuid *resp_buf;
resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
if (!resp_buf) {
spin_lock(&vgdev->resource_export_lock);
bo->uuid_state = STATE_ERR;
spin_unlock(&vgdev->resource_export_lock);
virtio_gpu_array_put_free(objs);
return -ENOMEM;
}
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
vbuf->objs = objs;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_object *bo =
gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
struct virtio_gpu_resp_map_info *resp =
(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
uint32_t resp_type = le32_to_cpu(resp->hdr.type);
spin_lock(&vgdev->host_visible_lock);
if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
vram->map_info = resp->map_info;
vram->map_state = STATE_OK;
} else {
vram->map_state = STATE_ERR;
}
spin_unlock(&vgdev->host_visible_lock);
wake_up_all(&vgdev->resp_wq);
}
int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs, uint64_t offset)
{
struct virtio_gpu_resource_map_blob *cmd_p;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_resp_map_info *resp_buf;
resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
if (!resp_buf)
return -ENOMEM;
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_map_info), resp_buf);
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
vbuf->objs = objs;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo)
{
struct virtio_gpu_resource_unmap_blob *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
struct virtio_gpu_mem_entry *ents,
uint32_t nents)
{
struct virtio_gpu_resource_create_blob *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
cmd_p->blob_id = cpu_to_le64(params->blob_id);
cmd_p->size = cpu_to_le64(params->size);
cmd_p->nr_entries = cpu_to_le32(nents);
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
}
void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
uint32_t scanout_id,
struct virtio_gpu_object *bo,
struct drm_framebuffer *fb,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y)
{
uint32_t i;
struct virtio_gpu_set_scanout_blob *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
uint32_t format = virtio_gpu_translate_format(fb->format->format);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->scanout_id = cpu_to_le32(scanout_id);
cmd_p->format = cpu_to_le32(format);
cmd_p->width = cpu_to_le32(fb->width);
cmd_p->height = cpu_to_le32(fb->height);
for (i = 0; i < 4; i++) {
cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
}
cmd_p->r.width = cpu_to_le32(width);
cmd_p->r.height = cpu_to_le32(height);
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_vq.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include "virtgpu_drv.h"
static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
bool value)
{
seq_printf(m, "%-16s : %s\n", name, str_yes_no(value));
}
static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
{
seq_printf(m, "%-16s : %d\n", name, value);
}
static int virtio_gpu_features(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_gpu_add_bool(m, "edid", vgdev->has_edid);
virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect);
virtio_gpu_add_bool(m, "resource uuid",
vgdev->has_resource_assign_uuid);
virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
return 0;
}
static int
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
seq_printf(m, "fence %llu %lld\n",
(u64)atomic64_read(&vgdev->fence_drv.last_fence_id),
vgdev->fence_drv.current_fence_id);
return 0;
}
static int
virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
struct drm_printer p;
if (!vgdev->has_host_visible) {
seq_puts(m, "Host allocations not visible to guest\n");
return 0;
}
p = drm_seq_file_printer(m);
drm_mm_print(&vgdev->host_visible_mm, &p);
return 0;
}
static struct drm_info_list virtio_gpu_debugfs_list[] = {
{ "virtio-gpu-features", virtio_gpu_features },
{ "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
{ "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm },
};
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
void
virtio_gpu_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(virtio_gpu_debugfs_list,
VIRTIO_GPU_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_debugfs.c |
/*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Authors:
* Dave Airlie
* Alon Levy
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "virtgpu_drv.h"
#define XRES_MIN 32
#define YRES_MIN 32
#define XRES_DEF 1024
#define YRES_DEF 768
#define XRES_MAX 8192
#define YRES_MAX 8192
#define drm_connector_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, conn)
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
vgfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
vgfb->base.obj[0] = NULL;
return ret;
}
return 0;
}
static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
crtc->mode.hdisplay,
crtc->mode.vdisplay, 0, 0);
virtio_gpu_notify(vgdev);
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
return 0;
}
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
/*
* virtio-gpu can't do modeset and plane update operations
* independent from each other. So the actual modeset happens
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
output->needs_modeset = true;
}
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
.atomic_flush = virtio_gpu_crtc_atomic_flush,
.atomic_enable = virtio_gpu_crtc_atomic_enable,
.atomic_disable = virtio_gpu_crtc_atomic_disable,
};
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void virtio_gpu_enc_enable(struct drm_encoder *encoder)
{
}
static void virtio_gpu_enc_disable(struct drm_encoder *encoder)
{
}
static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
struct drm_display_mode *mode = NULL;
int count, width, height;
if (output->edid) {
count = drm_add_edid_modes(connector, output->edid);
if (count)
return count;
}
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
if (width == 0 || height == 0) {
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
} else {
DRM_DEBUG("add mode: %dx%d\n", width, height);
mode = drm_cvt_mode(connector->dev, width, height, 60,
false, false, false);
if (!mode)
return count;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
count++;
}
return count;
}
static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
int width, height;
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
if (!(mode->type & DRM_MODE_TYPE_PREFERRED))
return MODE_OK;
if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF)
return MODE_OK;
if (mode->hdisplay <= width && mode->hdisplay >= width - 16 &&
mode->vdisplay <= height && mode->vdisplay >= height - 16)
return MODE_OK;
DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay);
return MODE_BAD;
}
static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
.mode_set = virtio_gpu_enc_mode_set,
.enable = virtio_gpu_enc_enable,
.disable = virtio_gpu_enc_disable,
};
static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
.get_modes = virtio_gpu_conn_get_modes,
.mode_valid = virtio_gpu_conn_mode_valid,
};
static enum drm_connector_status virtio_gpu_conn_detect(
struct drm_connector *connector,
bool force)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
if (output->info.enabled)
return connector_status_connected;
else
return connector_status_disconnected;
}
static void virtio_gpu_conn_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.detect = virtio_gpu_conn_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = virtio_gpu_conn_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
{
struct drm_device *dev = vgdev->ddev;
struct virtio_gpu_output *output = vgdev->outputs + index;
struct drm_connector *connector = &output->conn;
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor;
output->index = index;
if (index == 0) {
output->info.enabled = cpu_to_le32(true);
output->info.r.width = cpu_to_le32(XRES_DEF);
output->info.r.height = cpu_to_le32(YRES_DEF);
}
primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
if (vgdev->has_edid)
drm_connector_attach_edid_property(connector);
drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
drm_connector_attach_encoder(connector, encoder);
drm_connector_register(connector);
return 0;
}
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj = NULL;
struct virtio_gpu_framebuffer *virtio_gpu_fb;
int ret;
if (mode_cmd->pixel_format != DRM_FORMAT_HOST_XRGB8888 &&
mode_cmd->pixel_format != DRM_FORMAT_HOST_ARGB8888)
return ERR_PTR(-ENOENT);
/* lookup object associated with res handle */
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!obj)
return ERR_PTR(-EINVAL);
virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
if (virtio_gpu_fb == NULL) {
drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
return NULL;
}
return &virtio_gpu_fb->base;
}
static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.fb_create = virtio_gpu_user_framebuffer_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
if (!vgdev->num_scanouts)
return 0;
ret = drmm_mode_config_init(vgdev->ddev);
if (ret)
return ret;
vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true;
vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
/* modes will be validated against the framebuffer size */
vgdev->ddev->mode_config.min_width = XRES_MIN;
vgdev->ddev->mode_config.min_height = YRES_MIN;
vgdev->ddev->mode_config.max_width = XRES_MAX;
vgdev->ddev->mode_config.max_height = YRES_MAX;
vgdev->ddev->mode_config.fb_modifiers_not_supported = true;
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
drm_mode_config_reset(vgdev->ddev);
return 0;
}
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
int i;
if (!vgdev->num_scanouts)
return;
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
}
| linux-master | drivers/gpu/drm/virtio/virtgpu_display.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_panel.h"
struct panel_module {
struct tilcdc_module base;
struct tilcdc_panel_info *info;
struct display_timings *timings;
struct backlight_device *backlight;
struct gpio_desc *enable_gpio;
};
#define to_panel_module(x) container_of(x, struct panel_module, base)
/*
* Encoder:
*/
struct panel_encoder {
struct drm_encoder base;
struct panel_module *mod;
};
#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
struct backlight_device *backlight = panel_encoder->mod->backlight;
struct gpio_desc *gpio = panel_encoder->mod->enable_gpio;
if (backlight) {
backlight->props.power = mode == DRM_MODE_DPMS_ON ?
FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
backlight_update_status(backlight);
}
if (gpio)
gpiod_set_value_cansleep(gpio,
mode == DRM_MODE_DPMS_ON ? 1 : 0);
}
static void panel_encoder_prepare(struct drm_encoder *encoder)
{
panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void panel_encoder_commit(struct drm_encoder *encoder)
{
panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static void panel_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* nothing needed */
}
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
.prepare = panel_encoder_prepare,
.commit = panel_encoder_commit,
.mode_set = panel_encoder_mode_set,
};
static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
struct panel_module *mod)
{
struct panel_encoder *panel_encoder;
struct drm_encoder *encoder;
int ret;
panel_encoder = devm_kzalloc(dev->dev, sizeof(*panel_encoder),
GFP_KERNEL);
if (!panel_encoder)
return NULL;
panel_encoder->mod = mod;
encoder = &panel_encoder->base;
encoder->possible_crtcs = 1;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0)
goto fail;
drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
return encoder;
fail:
drm_encoder_cleanup(encoder);
return NULL;
}
/*
* Connector:
*/
struct panel_connector {
struct drm_connector base;
struct drm_encoder *encoder; /* our connected encoder */
struct panel_module *mod;
};
#define to_panel_connector(x) container_of(x, struct panel_connector, base)
static void panel_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static int panel_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct panel_connector *panel_connector = to_panel_connector(connector);
struct display_timings *timings = panel_connector->mod->timings;
int i;
for (i = 0; i < timings->num_timings; i++) {
struct drm_display_mode *mode;
struct videomode vm;
if (videomode_from_timings(timings, &vm, i))
break;
mode = drm_mode_create(dev);
if (!mode)
break;
drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER;
if (timings->native_mode == i)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
return i;
}
static struct drm_encoder *panel_connector_best_encoder(
struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
return panel_connector->encoder;
}
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
.get_modes = panel_connector_get_modes,
.best_encoder = panel_connector_best_encoder,
};
static struct drm_connector *panel_connector_create(struct drm_device *dev,
struct panel_module *mod, struct drm_encoder *encoder)
{
struct panel_connector *panel_connector;
struct drm_connector *connector;
int ret;
panel_connector = devm_kzalloc(dev->dev, sizeof(*panel_connector),
GFP_KERNEL);
if (!panel_connector)
return NULL;
panel_connector->encoder = encoder;
panel_connector->mod = mod;
connector = &panel_connector->base;
drm_connector_init(dev, connector, &panel_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &panel_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
return connector;
fail:
panel_connector_destroy(connector);
return NULL;
}
/*
* Module:
*/
static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
{
struct panel_module *panel_mod = to_panel_module(mod);
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
encoder = panel_encoder_create(dev, panel_mod);
if (!encoder)
return -ENOMEM;
connector = panel_connector_create(dev, panel_mod, encoder);
if (!connector)
return -ENOMEM;
priv->encoders[priv->num_encoders++] = encoder;
priv->connectors[priv->num_connectors++] = connector;
tilcdc_crtc_set_panel_info(priv->crtc,
to_panel_encoder(encoder)->mod->info);
return 0;
}
static const struct tilcdc_module_ops panel_module_ops = {
.modeset_init = panel_modeset_init,
};
/*
* Device:
*/
/* maybe move this somewhere common if it is needed by other outputs? */
static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
{
struct device_node *info_np;
struct tilcdc_panel_info *info;
int ret = 0;
if (!np) {
pr_err("%s: no devicenode given\n", __func__);
return NULL;
}
info_np = of_get_child_by_name(np, "panel-info");
if (!info_np) {
pr_err("%s: could not find panel-info node\n", __func__);
return NULL;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
goto put_node;
ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
/* optional: */
info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
if (ret) {
pr_err("%s: error reading panel-info properties\n", __func__);
kfree(info);
info = NULL;
}
put_node:
of_node_put(info_np);
return info;
}
static int panel_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct backlight_device *backlight;
struct panel_module *panel_mod;
struct tilcdc_module *mod;
struct pinctrl *pinctrl;
int ret;
/* bail out early if no DT data: */
if (!node) {
dev_err(&pdev->dev, "device-tree data is missing\n");
return -ENXIO;
}
panel_mod = devm_kzalloc(&pdev->dev, sizeof(*panel_mod), GFP_KERNEL);
if (!panel_mod)
return -ENOMEM;
backlight = devm_of_find_backlight(&pdev->dev);
if (IS_ERR(backlight))
return PTR_ERR(backlight);
panel_mod->backlight = backlight;
panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(panel_mod->enable_gpio)) {
ret = PTR_ERR(panel_mod->enable_gpio);
dev_err(&pdev->dev, "failed to request enable GPIO\n");
goto fail_backlight;
}
if (panel_mod->enable_gpio)
dev_info(&pdev->dev, "found enable GPIO\n");
mod = &panel_mod->base;
pdev->dev.platform_data = mod;
tilcdc_module_init(mod, "panel", &panel_module_ops);
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev, "pins are not configured\n");
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
ret = -EINVAL;
goto fail_free;
}
panel_mod->info = of_get_panel_info(node);
if (!panel_mod->info) {
dev_err(&pdev->dev, "could not get panel info\n");
ret = -EINVAL;
goto fail_timings;
}
return 0;
fail_timings:
display_timings_release(panel_mod->timings);
fail_free:
tilcdc_module_cleanup(mod);
fail_backlight:
if (panel_mod->backlight)
put_device(&panel_mod->backlight->dev);
return ret;
}
static void panel_remove(struct platform_device *pdev)
{
struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
struct panel_module *panel_mod = to_panel_module(mod);
struct backlight_device *backlight = panel_mod->backlight;
if (backlight)
put_device(&backlight->dev);
display_timings_release(panel_mod->timings);
tilcdc_module_cleanup(mod);
kfree(panel_mod->info);
}
static const struct of_device_id panel_of_match[] = {
{ .compatible = "ti,tilcdc,panel", },
{ },
};
static struct platform_driver panel_driver = {
.probe = panel_probe,
.remove_new = panel_remove,
.driver = {
.name = "tilcdc-panel",
.of_match_table = panel_of_match,
},
};
int __init tilcdc_panel_init(void)
{
return platform_driver_register(&panel_driver);
}
void __exit tilcdc_panel_fini(void)
{
platform_driver_unregister(&panel_driver);
}
| linux-master | drivers/gpu/drm/tilcdc/tilcdc_panel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
#define TILCDC_PALETTE_SIZE 32
#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
struct tilcdc_crtc {
struct drm_crtc base;
struct drm_plane primary;
const struct tilcdc_panel_info *info;
struct drm_pending_vblank_event *event;
struct mutex enable_lock;
bool enabled;
bool shutdown;
wait_queue_head_t frame_done_wq;
bool frame_done;
spinlock_t irq_lock;
unsigned int lcd_fck_rate;
ktime_t last_vblank;
unsigned int hvtotal_us;
struct drm_framebuffer *next_fb;
/* Only set if an external encoder is connected */
bool simulate_vesa_sync;
int sync_lost_count;
bool frame_intact;
struct work_struct recover_work;
dma_addr_t palette_dma_handle;
u16 *palette_base;
struct completion palette_loaded;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_gem_dma_object *gem;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
gem = drm_fb_dma_get_gem_obj(fb, 0);
start = gem->dma_addr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
crtc->x * fb->format->cpp[0];
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
/* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
* with a single insruction, if available. This should make it more
* unlikely that LCDC would fetch the DMA addresses in the middle of
* an update.
*/
if (priv->rev == 1)
end -= 1;
dma_base_and_ceiling = (u64)end << 32 | start;
tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
}
/*
* The driver currently only supports only true color formats. For
* true color the palette block is bypassed, but a 32 byte palette
* should still be loaded. The first 16-bit entry must be 0x4000 while
* all other entries must be zeroed.
*/
static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
reinit_completion(&tilcdc_crtc->palette_loaded);
/* Tell the LCDC where the palette is located. */
tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
tilcdc_crtc->palette_dma_handle);
tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
(u32) tilcdc_crtc->palette_dma_handle +
TILCDC_PALETTE_SIZE - 1);
/* Set dma load mode for palette loading only. */
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
LCDC_PALETTE_LOAD_MODE_MASK);
/* Enable DMA Palette Loaded Interrupt */
if (priv->rev == 1)
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
else
tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
/* Enable LCDC DMA and wait for palette to be loaded. */
tilcdc_clear_irqstatus(dev, 0xffffffff);
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
msecs_to_jiffies(50));
if (ret == 0)
dev_err(dev->dev, "%s: Palette loading timeout", __func__);
/* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
if (priv->rev == 1)
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
else
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
}
static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
tilcdc_clear_irqstatus(dev, 0xffffffff);
if (priv->rev == 1) {
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA);
} else {
tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
LCDC_V2_UNDERFLOW_INT_ENA |
LCDC_FRAME_DONE | LCDC_SYNC_LOST);
}
}
static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
/* disable irqs that we might have enabled: */
if (priv->rev == 1) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
} else {
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
LCDC_V2_END_OF_FRAME0_INT_ENA |
LCDC_FRAME_DONE | LCDC_SYNC_LOST);
}
}
static void reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
if (priv->rev != 2)
return;
tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
usleep_range(250, 1000);
tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
}
/*
* Calculate the percentage difference between the requested pixel clock rate
* and the effective rate resulting from calculating the clock divider value.
*/
static unsigned int tilcdc_pclk_diff(unsigned long rate,
unsigned long real_rate)
{
int r = rate / 100, rr = real_rate / 100;
return (unsigned int)(abs(((rr - r) * 100) / r));
}
static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
unsigned long clk_rate, real_pclk_rate, pclk_rate;
unsigned int clkdiv;
int ret;
clkdiv = 2; /* first try using a standard divider of 2 */
/* mode.clock is in KHz, set_rate wants parameter in Hz */
pclk_rate = crtc->mode.clock * 1000;
ret = clk_set_rate(priv->clk, pclk_rate * clkdiv);
clk_rate = clk_get_rate(priv->clk);
real_pclk_rate = clk_rate / clkdiv;
if (ret < 0 || tilcdc_pclk_diff(pclk_rate, real_pclk_rate) > 5) {
/*
* If we fail to set the clock rate (some architectures don't
* use the common clock framework yet and may not implement
* all the clk API calls for every clock), try the next best
* thing: adjusting the clock divider, unless clk_get_rate()
* failed as well.
*/
if (!clk_rate) {
/* Nothing more we can do. Just bail out. */
dev_err(dev->dev,
"failed to set the pixel clock - unable to read current lcdc clock rate\n");
return;
}
clkdiv = DIV_ROUND_CLOSEST(clk_rate, pclk_rate);
/*
* Emit a warning if the real clock rate resulting from the
* calculated divider differs much from the requested rate.
*
* 5% is an arbitrary value - LCDs are usually quite tolerant
* about pixel clock rates.
*/
real_pclk_rate = clk_rate / clkdiv;
if (tilcdc_pclk_diff(pclk_rate, real_pclk_rate) > 5) {
dev_warn(dev->dev,
"effective pixel clock rate (%luHz) differs from the requested rate (%luHz)\n",
real_pclk_rate, pclk_rate);
}
}
tilcdc_crtc->lcd_fck_rate = clk_rate;
DBG("lcd_clk=%u, mode clock=%d, div=%u",
tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
/* Configure the LCD clock divisor. */
tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
LCDC_RASTER_MODE);
if (priv->rev == 2)
tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
LCDC_V2_CORE_CLK_EN);
}
static uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
{
return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
mode->clock);
}
static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
const struct tilcdc_panel_info *info = tilcdc_crtc->info;
uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_framebuffer *fb = crtc->primary->state->fb;
if (WARN_ON(!info))
return;
if (WARN_ON(!fb))
return;
/* Configure the Burst Size and fifo threshold of DMA: */
reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
switch (info->dma_burst_sz) {
case 1:
reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
break;
case 2:
reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
break;
case 4:
reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
break;
case 8:
reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
break;
case 16:
reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
break;
default:
dev_err(dev->dev, "invalid burst size\n");
return;
}
reg |= (info->fifo_th << 8);
tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
/* Configure timings: */
hbp = mode->htotal - mode->hsync_end;
hfp = mode->hsync_start - mode->hdisplay;
hsw = mode->hsync_end - mode->hsync_start;
vbp = mode->vtotal - mode->vsync_end;
vfp = mode->vsync_start - mode->vdisplay;
vsw = mode->vsync_end - mode->vsync_start;
DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
/* Set AC Bias Period and Number of Transitions per Interrupt: */
reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
/*
* subtract one from hfp, hbp, hsw because the hardware uses
* a value of 0 as 1
*/
if (priv->rev == 2) {
/* clear bits we're going to set */
reg &= ~0x78000033;
reg |= ((hfp-1) & 0x300) >> 8;
reg |= ((hbp-1) & 0x300) >> 4;
reg |= ((hsw-1) & 0x3c0) << 21;
}
tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
reg = (((mode->hdisplay >> 4) - 1) << 4) |
(((hbp-1) & 0xff) << 24) |
(((hfp-1) & 0xff) << 16) |
(((hsw-1) & 0x3f) << 10);
if (priv->rev == 2)
reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
reg = ((mode->vdisplay - 1) & 0x3ff) |
((vbp & 0xff) << 24) |
((vfp & 0xff) << 16) |
(((vsw-1) & 0x3f) << 10);
tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
/*
* be sure to set Bit 10 for the V2 LCDC controller,
* otherwise limited to 1024 pixels width, stopping
* 1920x1080 being supported.
*/
if (priv->rev == 2) {
if ((mode->vdisplay - 1) & 0x400) {
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
LCDC_LPP_B10);
} else {
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
LCDC_LPP_B10);
}
}
/* Configure display type: */
reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
0x000ff000 /* Palette Loading Delay bits */);
reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
switch (fb->format->format) {
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB565:
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
fallthrough;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
break;
default:
dev_err(dev->dev, "invalid pixel format\n");
return;
}
}
reg |= info->fdd << 12;
tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
if (info->invert_pxl_clk)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
if (info->sync_ctrl)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
if (info->sync_edge)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
if (info->raster_order)
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
else
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
tilcdc_crtc_set_clk(crtc);
tilcdc_crtc_load_palette(crtc);
set_scanout(crtc, fb);
drm_mode_copy(&crtc->hwmode, &crtc->state->adjusted_mode);
tilcdc_crtc->hvtotal_us =
tilcdc_mode_hvtotal(&crtc->hwmode);
}
static void tilcdc_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
unsigned long flags;
mutex_lock(&tilcdc_crtc->enable_lock);
if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
mutex_unlock(&tilcdc_crtc->enable_lock);
return;
}
pm_runtime_get_sync(dev->dev);
reset(crtc);
tilcdc_crtc_set_mode(crtc);
tilcdc_crtc_enable_irqs(dev);
tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
LCDC_PALETTE_LOAD_MODE_MASK);
/* There is no real chance for a race here as the time stamp
* is taken before the raster DMA is started. The spin-lock is
* taken to have a memory barrier after taking the time-stamp
* and to avoid a context switch between taking the stamp and
* enabling the raster.
*/
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
tilcdc_crtc->last_vblank = ktime_get();
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
drm_crtc_vblank_on(crtc);
tilcdc_crtc->enabled = true;
mutex_unlock(&tilcdc_crtc->enable_lock);
}
static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
tilcdc_crtc_enable(crtc);
}
static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
int ret;
mutex_lock(&tilcdc_crtc->enable_lock);
if (shutdown)
tilcdc_crtc->shutdown = true;
if (!tilcdc_crtc->enabled) {
mutex_unlock(&tilcdc_crtc->enable_lock);
return;
}
tilcdc_crtc->frame_done = false;
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
/*
* Wait for framedone irq which will still come before putting
* things to sleep..
*/
ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
tilcdc_crtc->frame_done,
msecs_to_jiffies(500));
if (ret == 0)
dev_err(dev->dev, "%s: timeout waiting for framedone\n",
__func__);
drm_crtc_vblank_off(crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
tilcdc_crtc_disable_irqs(dev);
pm_runtime_put_sync(dev->dev);
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
}
static void tilcdc_crtc_disable(struct drm_crtc *crtc)
{
tilcdc_crtc_off(crtc, false);
}
static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
tilcdc_crtc_disable(crtc);
}
static void tilcdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
if (!crtc->state->event)
return;
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irq(&crtc->dev->event_lock);
}
void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
{
tilcdc_crtc_off(crtc, true);
}
static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
{
return crtc->state && crtc->state->enable && crtc->state->active;
}
static void tilcdc_crtc_recover_work(struct work_struct *work)
{
struct tilcdc_crtc *tilcdc_crtc =
container_of(work, struct tilcdc_crtc, recover_work);
struct drm_crtc *crtc = &tilcdc_crtc->base;
dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
drm_modeset_lock(&crtc->mutex, NULL);
if (!tilcdc_crtc_is_on(crtc))
goto out;
tilcdc_crtc_disable(crtc);
tilcdc_crtc_enable(crtc);
out:
drm_modeset_unlock(&crtc->mutex);
}
static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
{
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
tilcdc_crtc_shutdown(crtc);
flush_workqueue(priv->wq);
of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
}
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
if (tilcdc_crtc->event) {
dev_err(dev->dev, "already pending page flip!\n");
return -EBUSY;
}
tilcdc_crtc->event = event;
mutex_lock(&tilcdc_crtc->enable_lock);
if (tilcdc_crtc->enabled) {
unsigned long flags;
ktime_t next_vblank;
s64 tdiff;
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
tilcdc_crtc->hvtotal_us);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
tilcdc_crtc->next_fb = fb;
else
set_scanout(crtc, fb);
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
}
mutex_unlock(&tilcdc_crtc->enable_lock);
return 0;
}
static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
if (!tilcdc_crtc->simulate_vesa_sync)
return true;
/*
* tilcdc does not generate VESA-compliant sync but aligns
* VS on the second edge of HS instead of first edge.
* We use adjusted_mode, to fixup sync by aligning both rising
* edges and add HSKEW offset to fix the sync.
*/
adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
} else {
adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
}
return true;
}
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
/* If we are not active we don't care */
if (!crtc_state->active)
return 0;
if (state->planes[0].ptr != crtc->primary ||
state->planes[0].state == NULL ||
state->planes[0].state->crtc != crtc) {
dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
return -EINVAL;
}
return 0;
}
static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
tilcdc_clear_irqstatus(dev, LCDC_END_OF_FRAME0);
if (priv->rev == 1)
tilcdc_set(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
else
tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG,
LCDC_V2_END_OF_FRAME0_INT_ENA);
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
return 0;
}
static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
if (priv->rev == 1)
tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
else
tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
LCDC_V2_END_OF_FRAME0_INT_ENA);
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
}
static void tilcdc_crtc_reset(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
int ret;
drm_atomic_helper_crtc_reset(crtc);
/* Turn the raster off if it for some reason is on. */
pm_runtime_get_sync(dev->dev);
if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
/* Enable DMA Frame Done Interrupt */
tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
tilcdc_clear_irqstatus(dev, 0xffffffff);
tilcdc_crtc->frame_done = false;
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
tilcdc_crtc->frame_done,
msecs_to_jiffies(500));
if (ret == 0)
dev_err(dev->dev, "%s: timeout waiting for framedone\n",
__func__);
}
pm_runtime_put_sync(dev->dev);
}
static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
.destroy = tilcdc_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = tilcdc_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = tilcdc_crtc_enable_vblank,
.disable_vblank = tilcdc_crtc_disable_vblank,
};
static enum drm_mode_status
tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
unsigned int bandwidth;
uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
/*
* check to see if the width is within the range that
* the LCD Controller physically supports
*/
if (mode->hdisplay > priv->max_width)
return MODE_VIRTUAL_X;
/* width must be multiple of 16 */
if (mode->hdisplay & 0xf)
return MODE_VIRTUAL_X;
if (mode->vdisplay > 2048)
return MODE_VIRTUAL_Y;
DBG("Processing mode %dx%d@%d with pixel clock %d",
mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(mode), mode->clock);
hbp = mode->htotal - mode->hsync_end;
hfp = mode->hsync_start - mode->hdisplay;
hsw = mode->hsync_end - mode->hsync_start;
vbp = mode->vtotal - mode->vsync_end;
vfp = mode->vsync_start - mode->vdisplay;
vsw = mode->vsync_end - mode->vsync_start;
if ((hbp-1) & ~0x3ff) {
DBG("Pruning mode: Horizontal Back Porch out of range");
return MODE_HBLANK_WIDE;
}
if ((hfp-1) & ~0x3ff) {
DBG("Pruning mode: Horizontal Front Porch out of range");
return MODE_HBLANK_WIDE;
}
if ((hsw-1) & ~0x3ff) {
DBG("Pruning mode: Horizontal Sync Width out of range");
return MODE_HSYNC_WIDE;
}
if (vbp & ~0xff) {
DBG("Pruning mode: Vertical Back Porch out of range");
return MODE_VBLANK_WIDE;
}
if (vfp & ~0xff) {
DBG("Pruning mode: Vertical Front Porch out of range");
return MODE_VBLANK_WIDE;
}
if ((vsw-1) & ~0x3f) {
DBG("Pruning mode: Vertical Sync Width out of range");
return MODE_VSYNC_WIDE;
}
/*
* some devices have a maximum allowed pixel clock
* configured from the DT
*/
if (mode->clock > priv->max_pixelclock) {
DBG("Pruning mode: pixel clock too high");
return MODE_CLOCK_HIGH;
}
/*
* some devices further limit the max horizontal resolution
* configured from the DT
*/
if (mode->hdisplay > priv->max_width)
return MODE_BAD_WIDTH;
/* filter out modes that would require too much memory bandwidth: */
bandwidth = mode->hdisplay * mode->vdisplay *
drm_mode_vrefresh(mode);
if (bandwidth > priv->max_bandwidth) {
DBG("Pruning mode: exceeds defined bandwidth limit");
return MODE_BAD;
}
return MODE_OK;
}
static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
.mode_valid = tilcdc_crtc_mode_valid,
.mode_fixup = tilcdc_crtc_mode_fixup,
.atomic_check = tilcdc_crtc_atomic_check,
.atomic_enable = tilcdc_crtc_atomic_enable,
.atomic_disable = tilcdc_crtc_atomic_disable,
.atomic_flush = tilcdc_crtc_atomic_flush,
};
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
const struct tilcdc_panel_info *info)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
tilcdc_crtc->info = info;
}
void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
}
void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
drm_modeset_lock(&crtc->mutex, NULL);
if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
if (tilcdc_crtc_is_on(crtc)) {
pm_runtime_get_sync(dev->dev);
tilcdc_crtc_disable(crtc);
tilcdc_crtc_set_clk(crtc);
tilcdc_crtc_enable(crtc);
pm_runtime_put_sync(dev->dev);
}
}
drm_modeset_unlock(&crtc->mutex);
}
#define SYNC_LOST_COUNT_LIMIT 50
irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
uint32_t stat, reg;
stat = tilcdc_read_irqstatus(dev);
tilcdc_clear_irqstatus(dev, stat);
if (stat & LCDC_END_OF_FRAME0) {
bool skip_event = false;
ktime_t now;
now = ktime_get();
spin_lock(&tilcdc_crtc->irq_lock);
tilcdc_crtc->last_vblank = now;
if (tilcdc_crtc->next_fb) {
set_scanout(crtc, tilcdc_crtc->next_fb);
tilcdc_crtc->next_fb = NULL;
skip_event = true;
}
spin_unlock(&tilcdc_crtc->irq_lock);
drm_crtc_handle_vblank(crtc);
if (!skip_event) {
struct drm_pending_vblank_event *event;
spin_lock(&dev->event_lock);
event = tilcdc_crtc->event;
tilcdc_crtc->event = NULL;
if (event)
drm_crtc_send_vblank_event(crtc, event);
spin_unlock(&dev->event_lock);
}
if (tilcdc_crtc->frame_intact)
tilcdc_crtc->sync_lost_count = 0;
else
tilcdc_crtc->frame_intact = true;
}
if (stat & LCDC_FIFO_UNDERFLOW)
dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
__func__, stat);
if (stat & LCDC_PL_LOAD_DONE) {
complete(&tilcdc_crtc->palette_loaded);
if (priv->rev == 1)
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
LCDC_V1_PL_INT_ENA);
else
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_V2_PL_INT_ENA);
}
if (stat & LCDC_SYNC_LOST) {
dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
__func__, stat);
tilcdc_crtc->frame_intact = false;
if (priv->rev == 1) {
reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
if (reg & LCDC_RASTER_ENABLE) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
LCDC_RASTER_ENABLE);
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
LCDC_RASTER_ENABLE);
}
} else {
if (tilcdc_crtc->sync_lost_count++ >
SYNC_LOST_COUNT_LIMIT) {
dev_err(dev->dev,
"%s(0x%08x): Sync lost flood detected, recovering",
__func__, stat);
queue_work(system_wq,
&tilcdc_crtc->recover_work);
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
tilcdc_crtc->sync_lost_count = 0;
}
}
}
if (stat & LCDC_FRAME_DONE) {
tilcdc_crtc->frame_done = true;
wake_up(&tilcdc_crtc->frame_done_wq);
/* rev 1 lcdc appears to hang if irq is not disabled here */
if (priv->rev == 1)
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
LCDC_V1_FRAME_DONE_INT_ENA);
}
/* For revision 2 only */
if (priv->rev == 2) {
/* Indicate to LCDC that the interrupt service routine has
* completed, see 13.3.6.1.6 in AM335x TRM.
*/
tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
}
return IRQ_HANDLED;
}
int tilcdc_crtc_create(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc;
struct drm_crtc *crtc;
int ret;
tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
if (!tilcdc_crtc)
return -ENOMEM;
init_completion(&tilcdc_crtc->palette_loaded);
tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
TILCDC_PALETTE_SIZE,
&tilcdc_crtc->palette_dma_handle,
GFP_KERNEL | __GFP_ZERO);
if (!tilcdc_crtc->palette_base)
return -ENOMEM;
*tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
crtc = &tilcdc_crtc->base;
ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
if (ret < 0)
goto fail;
mutex_init(&tilcdc_crtc->enable_lock);
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
spin_lock_init(&tilcdc_crtc->irq_lock);
INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
ret = drm_crtc_init_with_planes(dev, crtc,
&tilcdc_crtc->primary,
NULL,
&tilcdc_crtc_funcs,
"tilcdc crtc");
if (ret < 0)
goto fail;
drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
if (priv->is_componentized) {
crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
if (!crtc->port) { /* This should never happen */
dev_err(dev->dev, "Port node not found in %pOF\n",
dev->dev->of_node);
ret = -EINVAL;
goto fail;
}
}
priv->crtc = crtc;
return 0;
fail:
tilcdc_crtc_destroy(crtc);
return ret;
}
| linux-master | drivers/gpu/drm/tilcdc/tilcdc_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <[email protected]>
*/
/* LCDC DRM driver, based on da8xx-fb */
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mm.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
#include "tilcdc_panel.h"
#include "tilcdc_regs.h"
static LIST_HEAD(module_list);
static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 };
static const u32 tilcdc_straight_formats[] = { DRM_FORMAT_RGB565,
DRM_FORMAT_BGR888,
DRM_FORMAT_XBGR8888 };
static const u32 tilcdc_crossed_formats[] = { DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888 };
static const u32 tilcdc_legacy_formats[] = { DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888 };
void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
const struct tilcdc_module_ops *funcs)
{
mod->name = name;
mod->funcs = funcs;
INIT_LIST_HEAD(&mod->list);
list_add(&mod->list, &module_list);
}
void tilcdc_module_cleanup(struct tilcdc_module *mod)
{
list_del(&mod->list);
}
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
/*
* tilcdc ->atomic_check can update ->mode_changed if pixel format
* changes, hence will we check modeset changes again.
*/
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
return ret;
}
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void modeset_init(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_module *mod;
list_for_each_entry(mod, &module_list, list) {
DBG("loading module: %s", mod->name);
mod->funcs->modeset_init(mod, dev);
}
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = priv->max_width;
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
}
#ifdef CONFIG_CPU_FREQ
static int cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct tilcdc_drm_private *priv = container_of(nb,
struct tilcdc_drm_private, freq_transition);
if (val == CPUFREQ_POSTCHANGE)
tilcdc_crtc_update_clk(priv->crtc);
return 0;
}
#endif
static irqreturn_t tilcdc_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct tilcdc_drm_private *priv = dev->dev_private;
return tilcdc_crtc_irq(priv->crtc);
}
static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq)
{
struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
ret = request_irq(irq, tilcdc_irq, 0, dev->driver->name, dev);
if (ret)
return ret;
priv->irq_enabled = false;
return 0;
}
static void tilcdc_irq_uninstall(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
if (!priv->irq_enabled)
return;
free_irq(priv->irq, dev);
priv->irq_enabled = false;
}
/*
* DRM operations:
*/
static void tilcdc_fini(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
#ifdef CONFIG_CPU_FREQ
if (priv->freq_transition.notifier_call)
cpufreq_unregister_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
if (priv->crtc)
tilcdc_crtc_shutdown(priv->crtc);
if (priv->is_registered)
drm_dev_unregister(dev);
drm_kms_helper_poll_fini(dev);
tilcdc_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
if (priv->clk)
clk_put(priv->clk);
if (priv->mmio)
iounmap(priv->mmio);
if (priv->wq)
destroy_workqueue(priv->wq);
dev->dev_private = NULL;
pm_runtime_disable(dev->dev);
drm_dev_put(dev);
}
static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
{
struct drm_device *ddev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct tilcdc_drm_private *priv;
struct resource *res;
u32 bpp = 0;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ddev = drm_dev_alloc(ddrv, dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ddev->dev_private = priv;
platform_set_drvdata(pdev, ddev);
drm_mode_config_init(ddev);
priv->is_componentized =
tilcdc_get_external_components(dev, NULL) > 0;
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
if (!priv->wq) {
ret = -ENOMEM;
goto init_failed;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to get memory resource\n");
ret = -EINVAL;
goto init_failed;
}
priv->mmio = ioremap(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
goto init_failed;
}
priv->clk = clk_get(dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get functional clock\n");
ret = -ENODEV;
goto init_failed;
}
pm_runtime_enable(dev);
/* Determine LCD IP Version */
pm_runtime_get_sync(dev);
switch (tilcdc_read(ddev, LCDC_PID_REG)) {
case 0x4c100102:
priv->rev = 1;
break;
case 0x4f200800:
case 0x4f201000:
priv->rev = 2;
break;
default:
dev_warn(dev, "Unknown PID Reg value 0x%08x, "
"defaulting to LCD revision 1\n",
tilcdc_read(ddev, LCDC_PID_REG));
priv->rev = 1;
break;
}
pm_runtime_put_sync(dev);
if (priv->rev == 1) {
DBG("Revision 1 LCDC supports only RGB565 format");
priv->pixelformats = tilcdc_rev1_formats;
priv->num_pixelformats = ARRAY_SIZE(tilcdc_rev1_formats);
bpp = 16;
} else {
const char *str = "\0";
of_property_read_string(node, "blue-and-red-wiring", &str);
if (0 == strcmp(str, "crossed")) {
DBG("Configured for crossed blue and red wires");
priv->pixelformats = tilcdc_crossed_formats;
priv->num_pixelformats =
ARRAY_SIZE(tilcdc_crossed_formats);
bpp = 32; /* Choose bpp with RGB support for fbdef */
} else if (0 == strcmp(str, "straight")) {
DBG("Configured for straight blue and red wires");
priv->pixelformats = tilcdc_straight_formats;
priv->num_pixelformats =
ARRAY_SIZE(tilcdc_straight_formats);
bpp = 16; /* Choose bpp with RGB support for fbdef */
} else {
DBG("Blue and red wiring '%s' unknown, use legacy mode",
str);
priv->pixelformats = tilcdc_legacy_formats;
priv->num_pixelformats =
ARRAY_SIZE(tilcdc_legacy_formats);
bpp = 16; /* This is just a guess */
}
}
if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
if (of_property_read_u32(node, "max-width", &priv->max_width)) {
if (priv->rev == 1)
priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V1;
else
priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V2;
}
DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
if (of_property_read_u32(node, "max-pixelclock",
&priv->max_pixelclock))
priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
dev_err(dev, "failed to create crtc\n");
goto init_failed;
}
modeset_init(ddev);
#ifdef CONFIG_CPU_FREQ
priv->freq_transition.notifier_call = cpufreq_transition;
ret = cpufreq_register_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
dev_err(dev, "failed to register cpufreq notifier\n");
priv->freq_transition.notifier_call = NULL;
goto init_failed;
}
#endif
if (priv->is_componentized) {
ret = component_bind_all(dev, ddev);
if (ret < 0)
goto init_failed;
ret = tilcdc_add_component_encoder(ddev);
if (ret < 0)
goto init_failed;
} else {
ret = tilcdc_attach_external_device(ddev);
if (ret)
goto init_failed;
}
if (!priv->external_connector &&
((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
dev_err(dev, "no encoders/connectors found\n");
ret = -EPROBE_DEFER;
goto init_failed;
}
ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
goto init_failed;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto init_failed;
priv->irq = ret;
ret = tilcdc_irq_install(ddev, priv->irq);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
goto init_failed;
}
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
ret = drm_dev_register(ddev, 0);
if (ret)
goto init_failed;
priv->is_registered = true;
drm_fbdev_dma_setup(ddev, bpp);
return 0;
init_failed:
tilcdc_fini(ddev);
return ret;
}
#if defined(CONFIG_DEBUG_FS)
static const struct {
const char *name;
uint8_t rev;
uint8_t save;
uint32_t reg;
} registers[] = {
#define REG(rev, save, reg) { #reg, rev, save, reg }
/* exists in revision 1: */
REG(1, false, LCDC_PID_REG),
REG(1, true, LCDC_CTRL_REG),
REG(1, false, LCDC_STAT_REG),
REG(1, true, LCDC_RASTER_CTRL_REG),
REG(1, true, LCDC_RASTER_TIMING_0_REG),
REG(1, true, LCDC_RASTER_TIMING_1_REG),
REG(1, true, LCDC_RASTER_TIMING_2_REG),
REG(1, true, LCDC_DMA_CTRL_REG),
REG(1, true, LCDC_DMA_FB_BASE_ADDR_0_REG),
REG(1, true, LCDC_DMA_FB_CEILING_ADDR_0_REG),
REG(1, true, LCDC_DMA_FB_BASE_ADDR_1_REG),
REG(1, true, LCDC_DMA_FB_CEILING_ADDR_1_REG),
/* new in revision 2: */
REG(2, false, LCDC_RAW_STAT_REG),
REG(2, false, LCDC_MASKED_STAT_REG),
REG(2, true, LCDC_INT_ENABLE_SET_REG),
REG(2, false, LCDC_INT_ENABLE_CLR_REG),
REG(2, false, LCDC_END_OF_INT_IND_REG),
REG(2, true, LCDC_CLK_ENABLE_REG),
#undef REG
};
#endif
#ifdef CONFIG_DEBUG_FS
static int tilcdc_regs_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
unsigned i;
pm_runtime_get_sync(dev->dev);
seq_printf(m, "revision: %d\n", priv->rev);
for (i = 0; i < ARRAY_SIZE(registers); i++)
if (priv->rev >= registers[i].rev)
seq_printf(m, "%s:\t %08x\n", registers[i].name,
tilcdc_read(dev, registers[i].reg));
pm_runtime_put_sync(dev->dev);
return 0;
}
static int tilcdc_mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
return 0;
}
static struct drm_info_list tilcdc_debugfs_list[] = {
{ "regs", tilcdc_regs_show, 0, NULL },
{ "mm", tilcdc_mm_show, 0, NULL },
};
static void tilcdc_debugfs_init(struct drm_minor *minor)
{
struct tilcdc_module *mod;
drm_debugfs_create_files(tilcdc_debugfs_list,
ARRAY_SIZE(tilcdc_debugfs_list),
minor->debugfs_root, minor);
list_for_each_entry(mod, &module_list, list)
if (mod->funcs->debugfs_init)
mod->funcs->debugfs_init(mod, minor);
}
#endif
DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver tilcdc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
#endif
.fops = &fops,
.name = "tilcdc",
.desc = "TI LCD Controller DRM",
.date = "20121205",
.major = 1,
.minor = 0,
};
/*
* Power management:
*/
static int tilcdc_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
int ret = 0;
ret = drm_mode_config_helper_suspend(ddev);
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
return ret;
}
static int tilcdc_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
return drm_mode_config_helper_resume(ddev);
}
static DEFINE_SIMPLE_DEV_PM_OPS(tilcdc_pm_ops,
tilcdc_pm_suspend, tilcdc_pm_resume);
/*
* Platform driver:
*/
static int tilcdc_bind(struct device *dev)
{
return tilcdc_init(&tilcdc_driver, dev);
}
static void tilcdc_unbind(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
/* Check if a subcomponent has already triggered the unloading. */
if (!ddev->dev_private)
return;
tilcdc_fini(dev_get_drvdata(dev));
}
static const struct component_master_ops tilcdc_comp_ops = {
.bind = tilcdc_bind,
.unbind = tilcdc_unbind,
};
static int tilcdc_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
int ret;
/* bail out early if no DT data: */
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "device-tree data is missing\n");
return -ENXIO;
}
ret = tilcdc_get_external_components(&pdev->dev, &match);
if (ret < 0)
return ret;
else if (ret == 0)
return tilcdc_init(&tilcdc_driver, &pdev->dev);
else
return component_master_add_with_match(&pdev->dev,
&tilcdc_comp_ops,
match);
}
static int tilcdc_pdev_remove(struct platform_device *pdev)
{
int ret;
ret = tilcdc_get_external_components(&pdev->dev, NULL);
if (ret < 0)
return ret;
else if (ret == 0)
tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
return 0;
}
static const struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
{ .compatible = "ti,da850-tilcdc", },
{ },
};
MODULE_DEVICE_TABLE(of, tilcdc_of_match);
static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
.remove = tilcdc_pdev_remove,
.driver = {
.name = "tilcdc",
.pm = pm_sleep_ptr(&tilcdc_pm_ops),
.of_match_table = tilcdc_of_match,
},
};
static int __init tilcdc_drm_init(void)
{
if (drm_firmware_drivers_only())
return -ENODEV;
DBG("init");
tilcdc_panel_init();
return platform_driver_register(&tilcdc_platform_driver);
}
static void __exit tilcdc_drm_fini(void)
{
DBG("fini");
platform_driver_unregister(&tilcdc_platform_driver);
tilcdc_panel_fini();
}
module_init(tilcdc_drm_init);
module_exit(tilcdc_drm_fini);
MODULE_AUTHOR("Rob Clark <[email protected]");
MODULE_DESCRIPTION("TI LCD Controller DRM Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tilcdc/tilcdc_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments
* Author: Jyri Sarha <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include "tilcdc_drv.h"
static const struct drm_plane_funcs tilcdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int tilcdc_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
unsigned int pitch;
if (!new_state->crtc)
return 0;
if (WARN_ON(!new_state->fb))
return -EINVAL;
if (new_state->crtc_x || new_state->crtc_y) {
dev_err(plane->dev->dev, "%s: crtc position must be zero.",
__func__);
return -EINVAL;
}
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_state->crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
if (crtc_state->mode.hdisplay != new_state->crtc_w ||
crtc_state->mode.vdisplay != new_state->crtc_h) {
dev_err(plane->dev->dev,
"%s: Size must match mode (%dx%d == %dx%d)", __func__,
crtc_state->mode.hdisplay, crtc_state->mode.vdisplay,
new_state->crtc_w, new_state->crtc_h);
return -EINVAL;
}
pitch = crtc_state->mode.hdisplay *
new_state->fb->format->cpp[0];
if (new_state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;
}
if (old_state->fb && new_state->fb->format != old_state->fb->format) {
dev_dbg(plane->dev->dev,
"%s(): pixel format change requires mode_change\n",
__func__);
crtc_state->mode_changed = true;
}
return 0;
}
static void tilcdc_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
if (!new_state->crtc)
return;
if (WARN_ON(!new_state->fb || !new_state->crtc->state))
return;
if (tilcdc_crtc_update_fb(new_state->crtc,
new_state->fb,
new_state->crtc->state->event) == 0) {
new_state->crtc->state->event = NULL;
}
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_check = tilcdc_plane_atomic_check,
.atomic_update = tilcdc_plane_atomic_update,
};
int tilcdc_plane_init(struct drm_device *dev,
struct drm_plane *plane)
{
struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
ret = drm_universal_plane_init(dev, plane, 1, &tilcdc_plane_funcs,
priv->pixelformats,
priv->num_pixelformats,
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
return ret;
}
drm_plane_helper_add(plane, &plane_helper_funcs);
return 0;
}
| linux-master | drivers/gpu/drm/tilcdc/tilcdc_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments
* Author: Jyri Sarha <[email protected]>
*/
#include <linux/component.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
static const struct tilcdc_panel_info panel_info_tda998x = {
.ac_bias = 255,
.ac_bias_intrpt = 0,
.dma_burst_sz = 16,
.bpp = 16,
.fdd = 0x80,
.tft_alt_mode = 0,
.invert_pxl_clk = 1,
.sync_edge = 1,
.sync_ctrl = 1,
.raster_order = 0,
};
static const struct tilcdc_panel_info panel_info_default = {
.ac_bias = 255,
.ac_bias_intrpt = 0,
.dma_burst_sz = 16,
.bpp = 16,
.fdd = 0x80,
.tft_alt_mode = 0,
.sync_edge = 0,
.sync_ctrl = 1,
.raster_order = 0,
};
static
struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
struct drm_encoder *encoder)
{
struct drm_connector *connector;
list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
if (drm_connector_has_possible_encoder(connector, encoder))
return connector;
}
dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
encoder->name, encoder->base.id);
return NULL;
}
int tilcdc_add_component_encoder(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
struct drm_encoder *encoder = NULL, *iter;
list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
if (iter->possible_crtcs & (1 << priv->crtc->index)) {
encoder = iter;
break;
}
if (!encoder) {
dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
return -ENODEV;
}
priv->external_connector =
tilcdc_encoder_find_connector(ddev, encoder);
if (!priv->external_connector)
return -ENODEV;
/* Only tda998x is supported at the moment. */
tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
return 0;
}
static
int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
int ret;
priv->external_encoder->possible_crtcs = BIT(0);
ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0);
if (ret)
return ret;
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default);
priv->external_connector =
tilcdc_encoder_find_connector(ddev, priv->external_encoder);
if (!priv->external_connector)
return -ENODEV;
return 0;
}
int tilcdc_attach_external_device(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
&panel, &bridge);
if (ret == -ENODEV)
return 0;
else if (ret)
return ret;
priv->external_encoder = devm_kzalloc(ddev->dev,
sizeof(*priv->external_encoder),
GFP_KERNEL);
if (!priv->external_encoder)
return -ENOMEM;
ret = drm_simple_encoder_init(ddev, priv->external_encoder,
DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
return ret;
}
if (panel) {
bridge = devm_drm_panel_bridge_add_typed(ddev->dev, panel,
DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto err_encoder_cleanup;
}
}
ret = tilcdc_attach_bridge(ddev, bridge);
if (ret)
goto err_encoder_cleanup;
return 0;
err_encoder_cleanup:
drm_encoder_cleanup(priv->external_encoder);
return ret;
}
static int dev_match_of(struct device *dev, void *data)
{
return dev->of_node == data;
}
int tilcdc_get_external_components(struct device *dev,
struct component_match **match)
{
struct device_node *node;
node = of_graph_get_remote_node(dev->of_node, 0, 0);
if (!of_device_is_compatible(node, "nxp,tda998x")) {
of_node_put(node);
return 0;
}
if (match)
drm_of_component_match_add(dev, match, dev_match_of, node);
of_node_put(node);
return 1;
}
| linux-master | drivers/gpu/drm/tilcdc/tilcdc_external.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
long err = 0;
/*
* Quick non-stalling check for idle.
*/
if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
return 0;
/*
* If possible, avoid waiting for GPU with mmap_lock
* held. We only do this if the fault allows retry and this
* is the first attempt.
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
return VM_FAULT_RETRY;
ttm_bo_get(bo);
mmap_read_unlock(vmf->vma->vm_mm);
(void)dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
return VM_FAULT_RETRY;
}
/*
* Ordinary wait.
*/
err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
if (unlikely(err < 0)) {
return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
}
return 0;
}
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct ttm_device *bdev = bo->bdev;
if (bdev->funcs->io_mem_pfn)
return bdev->funcs->io_mem_pfn(bo, page_offset);
return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
}
/**
* ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
* @bo: The buffer object
* @vmf: The fault structure handed to the callback
*
* vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
* during long waits, and after the wait the callback will be restarted. This
* is to allow other threads using the same virtual memory space concurrent
* access to map(), unmap() completely unrelated buffer objects. TTM buffer
* object reservations sometimes wait for GPU and should therefore be
* considered long waits. This function reserves the buffer object interruptibly
* taking this into account. Starvation is avoided by the vm system not
* allowing too many repeated restarts.
* This function is intended to be used in customized fault() and _mkwrite()
* handlers.
*
* Return:
* 0 on success and the bo was reserved.
* VM_FAULT_RETRY if blocking wait.
* VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
*/
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
/*
* Work around locking order reversal in fault / nopfn
* between mmap_lock and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
/*
* If the fault allows retry and this is the first
* fault attempt, we try to release the mmap_lock
* before waiting
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
}
if (dma_resv_lock_interruptible(bo->base.resv, NULL))
return VM_FAULT_NOPAGE;
}
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
* @prot: The page protection to be used for this memory area.
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
* instructing the caller to retry the page access.
*
* Return:
* VM_FAULT_NOPAGE on success or pending signal
* VM_FAULT_SIGBUS on unspecified error
* VM_FAULT_OOM on out-of-memory
* VM_FAULT_RETRY if retryable wait
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct ttm_device *bdev = bo->bdev;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
struct ttm_tt *ttm = NULL;
struct page *page;
int err;
pgoff_t i;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
/*
* Wait for buffer data in transit, due to a pipelined
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0))
return ret;
err = ttm_mem_io_reserve(bdev, bo->resource);
if (unlikely(err != 0))
return VM_FAULT_SIGBUS;
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= PFN_UP(bo->base.size)))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, bo->resource, prot);
if (!bo->resource->bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
.force_alloc = true
};
ttm = bo->ttm;
err = ttm_tt_populate(bdev, bo->ttm, &ctx);
if (err) {
if (err == -EINTR || err == -ERESTARTSYS ||
err == -EAGAIN)
return VM_FAULT_NOPAGE;
pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
return VM_FAULT_SIGBUS;
}
} else {
/* Iomem should not be marked encrypted */
prot = pgprot_decrypted(prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
for (i = 0; i < num_prefault; ++i) {
if (bo->resource->bus.is_iomem) {
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
return VM_FAULT_OOM;
} else if (unlikely(!page)) {
break;
}
pfn = page_to_pfn(page);
}
/*
* Note that the value of @prot at this point may differ from
* the value of @vma->vm_page_prot in the caching- and
* encryption bits. This is because the exact location of the
* data may not be known at mmap() time and may also change
* at arbitrary times while the data is mmap'ed.
* See vmf_insert_pfn_prot() for a discussion.
*/
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
if (i == 0)
return VM_FAULT_NOPAGE;
else
break;
}
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
break;
}
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
{
struct page *dummy_page = (struct page *)res;
__free_page(dummy_page);
}
vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct drm_device *ddev = bo->base.dev;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address;
unsigned long pfn;
struct page *page;
/* Allocate new dummy page to map all the VA range in this VMA to it*/
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return VM_FAULT_OOM;
/* Set the page to be freed using drmm release action */
if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
return VM_FAULT_OOM;
pfn = page_to_pfn(page);
/* Prefault the entire VMA range right away to avoid further faults */
for (address = vma->vm_start; address < vma->vm_end;
address += PAGE_SIZE)
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct drm_device *ddev = bo->base.dev;
vm_fault_t ret;
int idx;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
prot = vma->vm_page_prot;
if (drm_dev_enter(ddev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, prot);
}
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
dma_resv_unlock(bo->base.resv);
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault);
void ttm_bo_vm_open(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
ttm_bo_get(bo);
}
EXPORT_SYMBOL(ttm_bo_vm_open);
void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
EXPORT_SYMBOL(ttm_bo_vm_close);
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
uint8_t *buf, int len, int write)
{
unsigned long page = offset >> PAGE_SHIFT;
unsigned long bytes_left = len;
int ret;
/* Copy a page at a time, that way no extra virtual address
* mapping is needed
*/
offset -= page << PAGE_SHIFT;
do {
unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
struct ttm_bo_kmap_obj map;
void *ptr;
bool is_iomem;
ret = ttm_bo_kmap(bo, page, 1, &map);
if (ret)
return ret;
ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
WARN_ON_ONCE(is_iomem);
if (write)
memcpy(ptr, buf, bytes);
else
memcpy(buf, ptr, bytes);
ttm_bo_kunmap(&map);
page++;
buf += bytes;
bytes_left -= bytes;
offset = 0;
} while (bytes_left);
return len;
}
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
unsigned long offset = (addr) - vma->vm_start +
((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
<< PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) > bo->base.size)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (ret)
return ret;
switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break;
default:
if (bo->bdev->funcs->access_memory)
ret = bo->bdev->funcs->access_memory(
bo, offset, buf, len, write);
else
ret = -EIO;
}
ttm_bo_unreserve(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = ttm_bo_vm_access,
};
/**
* ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
*
* @vma: vma as input from the fbdev mmap method.
* @bo: The bo backing the address space.
*
* Maps a buffer object.
*/
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
/* Enforce no COW since would have really strange behavior with it. */
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
ttm_bo_get(bo);
/*
* Drivers may want to override the vm_ops field. Otherwise we
* use TTM's default callbacks.
*/
if (!vma->vm_ops)
vma->vm_ops = &ttm_bo_vm_ops;
/*
* Note: We're transferring the bo reference to
* vma->vm_private_data here.
*/
vma->vm_private_data = bo;
vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
| linux-master | drivers/gpu/drm/ttm/ttm_bo_vm.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
/*
* Currently we use a spinlock for the lock, but a mutex *may* be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns.
*/
struct ttm_range_manager {
struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
};
static inline struct ttm_range_manager *
to_range_manager(struct ttm_resource_manager *man)
{
return container_of(man, struct ttm_range_manager, manager);
}
static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct ttm_range_manager *rman = to_range_manager(man);
struct ttm_range_mgr_node *node;
struct drm_mm *mm = &rman->mm;
enum drm_mm_insert_mode mode;
unsigned long lpfn;
int ret;
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
ttm_resource_init(bo, place, &node->base);
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
PFN_UP(node->base.size),
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
ttm_resource_fini(man, &node->base);
kfree(node);
return ret;
}
node->base.start = node->mm_nodes[0].start;
*res = &node->base;
return 0;
}
static void ttm_range_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct ttm_range_manager *rman = to_range_manager(man);
spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock);
ttm_resource_fini(man, res);
kfree(node);
}
static bool ttm_range_man_intersects(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
u32 num_pages = PFN_UP(size);
/* Don't evict BOs outside of the requested placement range */
if (place->fpfn >= (node->start + num_pages) ||
(place->lpfn && place->lpfn <= node->start))
return false;
return true;
}
static bool ttm_range_man_compatible(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
u32 num_pages = PFN_UP(size);
if (node->start < place->fpfn ||
(place->lpfn && (node->start + num_pages) > place->lpfn))
return false;
return true;
}
static void ttm_range_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct ttm_range_manager *rman = to_range_manager(man);
spin_lock(&rman->lock);
drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
static const struct ttm_resource_manager_func ttm_range_manager_func = {
.alloc = ttm_range_man_alloc,
.free = ttm_range_man_free,
.intersects = ttm_range_man_intersects,
.compatible = ttm_range_man_compatible,
.debug = ttm_range_man_debug
};
/**
* ttm_range_man_init_nocheck - Initialise a generic range manager for the
* selected memory type.
*
* @bdev: ttm device
* @type: memory manager type
* @use_tt: if the memory manager uses tt
* @p_size: size of area to be managed in pages.
*
* The range manager is installed for this device in the type slot.
*
* Return: %0 on success or a negative error code on failure
*/
int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
struct ttm_resource_manager *man;
struct ttm_range_manager *rman;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
man = &rman->manager;
man->use_tt = use_tt;
man->func = &ttm_range_manager_func;
ttm_resource_manager_init(man, bdev, p_size);
drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock);
ttm_set_driver_manager(bdev, type, &rman->manager);
ttm_resource_manager_set_used(man, true);
return 0;
}
EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
* ttm_range_man_fini_nocheck - Remove the generic range manager from a slot
* and tear it down.
*
* @bdev: ttm device
* @type: memory manager type
*
* Return: %0 on success or a negative error code on failure
*/
int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
struct ttm_range_manager *rman = to_range_manager(man);
struct drm_mm *mm = &rman->mm;
int ret;
if (!man)
return 0;
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
return ret;
spin_lock(&rman->lock);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(bdev, type, NULL);
kfree(rman);
return 0;
}
EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
| linux-master | drivers/gpu/drm/ttm/ttm_range_manager.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#include <linux/iosys-map.h>
#include <linux/io-mapping.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
*
* For now just memset the structure to zero.
*/
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
*
* Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
* resource order never changes. Should be called with &ttm_device.lru_lock held.
*/
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
struct ttm_resource_manager *man;
if (!pos->first)
continue;
lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
dma_resv_assert_held(pos->first->bo->base.resv);
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
list_bulk_move_tail(&man->lru[j], &pos->first->lru,
&pos->last->lru);
}
}
}
EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
/* Return the bulk move pos object for this resource */
static struct ttm_lru_bulk_move_pos *
ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
{
return &bulk->pos[res->mem_type][res->bo->priority];
}
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
pos->first = list_next_entry(res, lru);
list_move(&res->lru, &pos->last->lru);
pos->last = res;
}
}
/* Add the resource to a bulk_move cursor */
static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
if (!pos->first) {
pos->first = res;
pos->last = res;
} else {
ttm_lru_bulk_move_pos_tail(pos, res);
}
}
/* Remove the resource from a bulk_move range */
static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
if (unlikely(WARN_ON(!pos->first || !pos->last) ||
(pos->first == res && pos->last == res))) {
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
pos->first = list_next_entry(res, lru);
} else if (pos->last == res) {
pos->last = list_prev_entry(res, lru);
} else {
list_move(&res->lru, &pos->last->lru);
}
}
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
if (bo->bulk_move && !bo->pin_count)
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
/* Remove the resource from a bulk move if the BO is configured for it */
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
if (bo->bulk_move && !bo->pin_count)
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
/* Move a resource to the LRU or bulk tail */
void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
list_move_tail(&res->lru, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
ttm_lru_bulk_move_pos(bo->bulk_move, res);
ttm_lru_bulk_move_pos_tail(pos, res);
} else {
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
list_move_tail(&res->lru, &man->lru[bo->priority]);
}
}
/**
* ttm_resource_init - resource object constructure
* @bo: buffer object this resources is allocated for
* @place: placement of the resource
* @res: the resource object to inistilize
*
* Initialize a new resource object. Counterpart of ttm_resource_fini().
*/
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res)
{
struct ttm_resource_manager *man;
res->start = 0;
res->size = bo->base.size;
res->mem_type = place->mem_type;
res->placement = place->flags;
res->bus.addr = NULL;
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
res->bo = bo;
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
list_add_tail(&res->lru, &bo->bdev->pinned);
else
list_add_tail(&res->lru, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
/**
* ttm_resource_fini - resource destructor
* @man: the resource manager this resource belongs to
* @res: the resource to clean up
*
* Should be used by resource manager backends to clean up the TTM resource
* objects before freeing the underlying structure. Makes sure the resource is
* removed from the LRU before destruction.
* Counterpart of ttm_resource_init().
*/
void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
list_del_init(&res->lru);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_fini);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res_ptr)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
int ret;
ret = man->func->alloc(man, bo, place, res_ptr);
if (ret)
return ret;
spin_lock(&bo->bdev->lru_lock);
ttm_resource_add_bulk_move(*res_ptr, bo);
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
struct ttm_resource_manager *man;
if (!*res)
return;
spin_lock(&bo->bdev->lru_lock);
ttm_resource_del_bulk_move(*res, bo);
spin_unlock(&bo->bdev->lru_lock);
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
}
EXPORT_SYMBOL(ttm_resource_free);
/**
* ttm_resource_intersects - test for intersection
*
* @bdev: TTM device structure
* @res: The resource to test
* @place: The placement to test
* @size: How many bytes the new allocation needs.
*
* Test if @res intersects with @place and @size. Used for testing if evictions
* are valueable or not.
*
* Returns true if the res placement intersects with @place and @size.
*/
bool ttm_resource_intersects(struct ttm_device *bdev,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
struct ttm_resource_manager *man;
if (!res)
return false;
man = ttm_manager_type(bdev, res->mem_type);
if (!place || !man->func->intersects)
return true;
return man->func->intersects(man, res, place, size);
}
/**
* ttm_resource_compatible - test for compatibility
*
* @bdev: TTM device structure
* @res: The resource to test
* @place: The placement to test
* @size: How many bytes the new allocation needs.
*
* Test if @res compatible with @place and @size.
*
* Returns true if the res placement compatible with @place and @size.
*/
bool ttm_resource_compatible(struct ttm_device *bdev,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
struct ttm_resource_manager *man;
if (!res || !place)
return false;
man = ttm_manager_type(bdev, res->mem_type);
if (!man->func->compatible)
return true;
return man->func->compatible(man, res, place, size);
}
static bool ttm_resource_places_compat(struct ttm_resource *res,
const struct ttm_place *places,
unsigned num_placement)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
unsigned i;
if (res->placement & TTM_PL_FLAG_TEMPORARY)
return false;
for (i = 0; i < num_placement; i++) {
const struct ttm_place *heap = &places[i];
if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
continue;
if ((res->mem_type == heap->mem_type) &&
(!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
(res->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
return false;
}
/**
* ttm_resource_compat - check if resource is compatible with placement
*
* @res: the resource to check
* @placement: the placement to check against
*
* Returns true if the placement is compatible.
*/
bool ttm_resource_compat(struct ttm_resource *res,
struct ttm_placement *placement)
{
if (ttm_resource_places_compat(res, placement->placement,
placement->num_placement))
return true;
if ((placement->busy_placement != placement->placement ||
placement->num_busy_placement > placement->num_placement) &&
ttm_resource_places_compat(res, placement->busy_placement,
placement->num_busy_placement))
return true;
return false;
}
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->lru_lock);
res->bo = bo;
spin_unlock(&bo->bdev->lru_lock);
}
/**
* ttm_resource_manager_init
*
* @man: memory manager object to init
* @bdev: ttm device this manager belongs to
* @size: size of managed resources in arbitrary units
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
struct ttm_device *bdev,
uint64_t size)
{
unsigned i;
spin_lock_init(&man->move_lock);
man->bdev = bdev;
man->size = size;
man->usage = 0;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL;
}
EXPORT_SYMBOL(ttm_resource_manager_init);
/*
* ttm_resource_manager_evict_all
*
* @bdev - device to use
* @man - manager to use
*
* Evict all the objects out of a memory manager until it is empty.
* Part of memory manager cleanup sequence.
*/
int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
.force_alloc = true
};
struct dma_fence *fence;
int ret;
unsigned i;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&bdev->lru_lock);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
NULL);
if (ret)
return ret;
spin_lock(&bdev->lru_lock);
}
}
spin_unlock(&bdev->lru_lock);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
ret = dma_fence_wait(fence, false);
dma_fence_put(fence);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(ttm_resource_manager_evict_all);
/**
* ttm_resource_manager_usage
*
* @man: A memory manager object.
*
* Return how many resources are currently used.
*/
uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
{
uint64_t usage;
spin_lock(&man->bdev->lru_lock);
usage = man->usage;
spin_unlock(&man->bdev->lru_lock);
return usage;
}
EXPORT_SYMBOL(ttm_resource_manager_usage);
/**
* ttm_resource_manager_debug
*
* @man: manager type to dump.
* @p: printer to use for debug.
*/
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p)
{
drm_printf(p, " use_type: %d\n", man->use_type);
drm_printf(p, " use_tt: %d\n", man->use_tt);
drm_printf(p, " size: %llu\n", man->size);
drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
if (man->func->debug)
man->func->debug(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
/**
* ttm_resource_manager_first
*
* @man: resource manager to iterate over
* @cursor: cursor to record the position
*
* Returns the first resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
struct ttm_resource *res;
lockdep_assert_held(&man->bdev->lru_lock);
for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
++cursor->priority)
list_for_each_entry(res, &man->lru[cursor->priority], lru)
return res;
return NULL;
}
/**
* ttm_resource_manager_next
*
* @man: resource manager to iterate over
* @cursor: cursor to record the position
* @res: the current resource pointer
*
* Returns the next resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_next(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor,
struct ttm_resource *res)
{
lockdep_assert_held(&man->bdev->lru_lock);
list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
return res;
for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
++cursor->priority)
list_for_each_entry(res, &man->lru[cursor->priority], lru)
return res;
return NULL;
}
static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_iomap *iter_io =
container_of(iter, typeof(*iter_io), base);
void __iomem *addr;
retry:
while (i >= iter_io->cache.end) {
iter_io->cache.sg = iter_io->cache.sg ?
sg_next(iter_io->cache.sg) : iter_io->st->sgl;
iter_io->cache.i = iter_io->cache.end;
iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
PAGE_SHIFT;
iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
iter_io->start;
}
if (i < iter_io->cache.i) {
iter_io->cache.end = 0;
iter_io->cache.sg = NULL;
goto retry;
}
addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
(((resource_size_t)i - iter_io->cache.i)
<< PAGE_SHIFT));
iosys_map_set_vaddr_iomem(dmap, addr);
}
static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
struct iosys_map *map)
{
io_mapping_unmap_local(map->vaddr_iomem);
}
static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
.map_local = ttm_kmap_iter_iomap_map_local,
.unmap_local = ttm_kmap_iter_iomap_unmap_local,
.maps_tt = false,
};
/**
* ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
* @iter_io: The struct ttm_kmap_iter_iomap to initialize.
* @iomap: The struct io_mapping representing the underlying linear io_memory.
* @st: sg_table into @iomap, representing the memory of the struct
* ttm_resource.
* @start: Offset that needs to be subtracted from @st to make
* sg_dma_address(st->sgl) - @start == 0 for @iomap start.
*
* Return: Pointer to the embedded struct ttm_kmap_iter.
*/
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
struct io_mapping *iomap,
struct sg_table *st,
resource_size_t start)
{
iter_io->base.ops = &ttm_kmap_iter_io_ops;
iter_io->iomap = iomap;
iter_io->st = st;
iter_io->start = start;
memset(&iter_io->cache, 0, sizeof(iter_io->cache));
return &iter_io->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
/**
* DOC: Linear io iterator
*
* This code should die in the not too near future. Best would be if we could
* make io-mapping use memremap for all io memory, and have memremap
* implement a kmap_local functionality. We could then strip a huge amount of
* code. These linear io iterators are implemented to mimic old functionality,
* and they don't use kmap_local semantics at all internally. Rather ioremap or
* friends, and at least on 32-bit they add global TLB flushes and points
* of failure.
*/
static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_linear_io *iter_io =
container_of(iter, typeof(*iter_io), base);
*dmap = iter_io->dmap;
iosys_map_incr(dmap, i * PAGE_SIZE);
}
static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
.map_local = ttm_kmap_iter_linear_io_map_local,
.maps_tt = false,
};
/**
* ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
* @iter_io: The iterator to initialize
* @bdev: The TTM device
* @mem: The ttm resource representing the iomap.
*
* This function is for internal TTM use only. It sets up a memcpy kmap iterator
* pointing at a linear chunk of io memory.
*
* Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
* failure.
*/
struct ttm_kmap_iter *
ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
struct ttm_device *bdev,
struct ttm_resource *mem)
{
int ret;
ret = ttm_mem_io_reserve(bdev, mem);
if (ret)
goto out_err;
if (!mem->bus.is_iomem) {
ret = -EINVAL;
goto out_io_free;
}
if (mem->bus.addr) {
iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
iter_io->needs_unmap = false;
} else {
iter_io->needs_unmap = true;
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
if (mem->bus.caching == ttm_write_combined)
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap_wc(mem->bus.offset,
mem->size));
else if (mem->bus.caching == ttm_cached)
iosys_map_set_vaddr(&iter_io->dmap,
memremap(mem->bus.offset, mem->size,
MEMREMAP_WB |
MEMREMAP_WT |
MEMREMAP_WC));
/* If uncached requested or if mapping cached or wc failed */
if (iosys_map_is_null(&iter_io->dmap))
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap(mem->bus.offset,
mem->size));
if (iosys_map_is_null(&iter_io->dmap)) {
ret = -ENOMEM;
goto out_io_free;
}
}
iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
return &iter_io->base;
out_io_free:
ttm_mem_io_free(bdev, mem);
out_err:
return ERR_PTR(ret);
}
/**
* ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
* @iter_io: The iterator to initialize
* @bdev: The TTM device
* @mem: The ttm resource representing the iomap.
*
* This function is for internal TTM use only. It cleans up a memcpy kmap
* iterator initialized by ttm_kmap_iter_linear_io_init.
*/
void
ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
if (iter_io->dmap.is_iomem)
iounmap(iter_io->dmap.vaddr_iomem);
else
memunmap(iter_io->dmap.vaddr);
}
ttm_mem_io_free(bdev, mem);
}
#if defined(CONFIG_DEBUG_FS)
static int ttm_resource_manager_show(struct seq_file *m, void *unused)
{
struct ttm_resource_manager *man =
(struct ttm_resource_manager *)m->private;
struct drm_printer p = drm_seq_file_printer(m);
ttm_resource_manager_debug(man, &p);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
#endif
/**
* ttm_resource_manager_create_debugfs - Create debugfs entry for specified
* resource manager.
* @man: The TTM resource manager for which the debugfs stats file be creates
* @parent: debugfs directory in which the file will reside
* @name: The filename to create.
*
* This function setups up a debugfs file that can be used to look
* at debug statistics of the specified ttm_resource_manager.
*/
void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
struct dentry * parent,
const char *name)
{
#if defined(CONFIG_DEBUG_FS)
debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
#endif
}
EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);
| linux-master | drivers/gpu/drm/ttm/ttm_resource.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <linux/vmalloc.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_cache.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
};
int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (mem->bus.offset || mem->bus.addr)
return 0;
mem->bus.is_iomem = false;
if (!bdev->funcs->io_mem_reserve)
return 0;
return bdev->funcs->io_mem_reserve(bdev, mem);
}
void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (!mem)
return;
if (!mem->bus.offset && !mem->bus.addr)
return;
if (bdev->funcs->io_mem_free)
bdev->funcs->io_mem_free(bdev, mem);
mem->bus.offset = 0;
mem->bus.addr = NULL;
}
/**
* ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
* @clear: Whether to clear rather than copy.
* @num_pages: Number of pages of the operation.
* @dst_iter: A struct ttm_kmap_iter representing the destination resource.
* @src_iter: A struct ttm_kmap_iter representing the source resource.
*
* This function is intended to be able to move out async under a
* dma-fence if desired.
*/
void ttm_move_memcpy(bool clear,
u32 num_pages,
struct ttm_kmap_iter *dst_iter,
struct ttm_kmap_iter *src_iter)
{
const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
struct iosys_map src_map, dst_map;
pgoff_t i;
/* Single TTM move. NOP */
if (dst_ops->maps_tt && src_ops->maps_tt)
return;
/* Don't move nonexistent data. Clear destination instead. */
if (clear) {
for (i = 0; i < num_pages; ++i) {
dst_ops->map_local(dst_iter, &dst_map, i);
if (dst_map.is_iomem)
memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
else
memset(dst_map.vaddr, 0, PAGE_SIZE);
if (dst_ops->unmap_local)
dst_ops->unmap_local(dst_iter, &dst_map);
}
return;
}
for (i = 0; i < num_pages; ++i) {
dst_ops->map_local(dst_iter, &dst_map, i);
src_ops->map_local(src_iter, &src_map, i);
drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
if (src_ops->unmap_local)
src_ops->unmap_local(src_iter, &src_map);
if (dst_ops->unmap_local)
dst_ops->unmap_local(dst_iter, &dst_map);
}
}
EXPORT_SYMBOL(ttm_move_memcpy);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
* @ctx: operation context
* @dst_mem: struct ttm_resource indicating where to move.
*
* Fallback move function for a mappable buffer object in mappable memory.
* The function will, if successful,
* free any old aperture space, and set (@new_mem)->mm_node to NULL,
* and update the (@bo)->mem placement flags. If unsuccessful, the old
* data remains untouched, and it's up to the caller to free the
* memory space indicated by @new_mem.
* Returns:
* !0: Failure.
*/
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *dst_mem)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *dst_man =
ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
struct ttm_resource *src_mem = bo->resource;
struct ttm_resource_manager *src_man;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
} _dst_iter, _src_iter;
struct ttm_kmap_iter *dst_iter, *src_iter;
bool clear;
int ret = 0;
if (WARN_ON(!src_mem))
return -EINVAL;
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
return ret;
}
dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
if (IS_ERR(dst_iter))
return PTR_ERR(dst_iter);
src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
if (IS_ERR(src_iter)) {
ret = PTR_ERR(src_iter);
goto out_src_iter;
}
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
ttm_bo_move_sync_cleanup(bo, dst_mem);
out_src_iter:
if (!dst_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
dma_resv_fini(&fbo->base.base._resv);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
/**
* ttm_buffer_object_transfer
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
* holding the data of @bo with the old placement.
*
* This is a utility function that may be called after an accelerated move
* has been scheduled. A new buffer object is created as a placeholder for
* the old data while it's being copied. When that buffer object is idle,
* it can be destroyed, releasing the space of the old placement.
* Returns:
* !0: Failure.
*/
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_transfer_obj *fbo;
int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
fbo->base = *bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
atomic_inc(&ttm_glob.bo_count);
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
if (fbo->base.resource) {
ttm_resource_set_bo(fbo->base.resource, &fbo->base);
bo->resource = NULL;
ttm_bo_set_bulk_move(&fbo->base, NULL);
} else {
fbo->base.bulk_move = NULL;
}
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
return ret;
}
ttm_bo_get(bo);
fbo->bo = bo;
ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
*new_obj = &fbo->base;
return 0;
}
/**
* ttm_io_prot
*
* @bo: ttm buffer object
* @res: ttm resource object
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp)
{
struct ttm_resource_manager *man;
enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type);
caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
struct ttm_resource *mem = bo->resource;
if (bo->resource->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
} else {
resource_size_t res = bo->resource->bus.offset + offset;
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(res, size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
map->virtual = ioremap_cache(res, size);
#endif
else
map->virtual = ioremap(res, size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long start_page,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_resource *mem = bo->resource;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
int ret;
BUG_ON(!ttm);
ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
if (ret)
return ret;
if (num_pages == 1 && ttm->caching == ttm_cached) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
}
/**
* ttm_bo_kmap
*
* @bo: The buffer object.
* @start_page: The first page to map.
* @num_pages: Number of pages to map.
* @map: pointer to a struct ttm_bo_kmap_obj representing the map.
*
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
* data in the buffer object. The ttm_kmap_obj_virtual function can then be
* used to obtain a virtual address to the data.
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid range.
*/
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
unsigned long offset, size;
int ret;
map->virtual = NULL;
map->bo = bo;
if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (ret)
return ret;
if (!bo->resource->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
/**
* ttm_bo_kunmap
*
* @map: Object describing the map to unmap.
*
* Unmaps a kernel map set up by ttm_bo_kmap.
*/
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
break;
case ttm_bo_map_kmap:
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
default:
BUG();
}
ttm_mem_io_free(map->bo->bdev, map->bo->resource);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
/**
* ttm_bo_vmap
*
* @bo: The buffer object.
* @map: pointer to a struct iosys_map representing the map.
*
* Sets up a kernel virtual mapping, using ioremap or vmap to the
* data in the buffer object. The parameter @map returns the virtual
* address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid range.
*/
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
int ret;
dma_resv_assert_held(bo->base.resv);
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (ret)
return ret;
if (mem->bus.is_iomem) {
void __iomem *vaddr_iomem;
if (mem->bus.addr)
vaddr_iomem = (void __iomem *)mem->bus.addr;
else if (mem->bus.caching == ttm_write_combined)
vaddr_iomem = ioremap_wc(mem->bus.offset,
bo->base.size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
vaddr_iomem = ioremap_cache(mem->bus.offset,
bo->base.size);
#endif
else
vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
if (!vaddr_iomem)
return -ENOMEM;
iosys_map_set_vaddr_iomem(map, vaddr_iomem);
} else {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
void *vaddr;
ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
if (ret)
return ret;
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_vmap);
/**
* ttm_bo_vunmap
*
* @bo: The buffer object.
* @map: Object describing the map to unmap.
*
* Unmaps a kernel map set up by ttm_bo_vmap().
*/
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
dma_resv_assert_held(bo->base.resv);
if (iosys_map_is_null(map))
return;
if (!map->is_iomem)
vunmap(map->vaddr);
else if (!mem->bus.addr)
iounmap(map->vaddr_iomem);
iosys_map_clear(map);
ttm_mem_io_free(bo->bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_vunmap);
static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
bool dst_use_tt)
{
long ret;
ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
false, 15 * HZ);
if (ret == 0)
return -EBUSY;
if (ret < 0)
return ret;
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->resource);
return 0;
}
static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool dst_use_tt)
{
struct ttm_buffer_object *ghost_obj;
int ret;
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
dma_resv_add_fence(&ghost_obj->base._resv, fence,
DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (dst_use_tt)
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
return 0;
}
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from;
from = ttm_manager_type(bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
* this eviction and free up the allocation
*/
spin_lock(&from->move_lock);
if (!from->move || dma_fence_is_later(fence, from->move)) {
dma_fence_put(from->move);
from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_resource_free(bo, &bo->resource);
}
/**
* ttm_bo_move_accel_cleanup - cleanup helper for hw copies
*
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @pipeline: evictions are to be pipelined.
* @new_mem: struct ttm_resource indicating where to move.
*
* Accelerated move function to be called when an accelerated move
* has been scheduled. The function will create a new temporary buffer object
* representing the old placement, and put the sync object on both buffer
* objects. After that the newly created buffer object is unref'd to be
* destroyed when the move is complete. This will help pipeline
* buffer moves.
*/
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool evict,
bool pipeline,
struct ttm_resource *new_mem)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
ttm_bo_move_pipeline_evict(bo, fence);
else
ret = ttm_bo_wait_free_node(bo, man->use_tt);
if (ret)
return ret;
ttm_bo_assign_mem(bo, new_mem);
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
/**
* ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_mem: struct ttm_resource indicating where to move.
*
* Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
* by the caller to be idle. Typically used after memcpy buffer moves.
*/
void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret;
ret = ttm_bo_wait_free_node(bo, man->use_tt);
if (WARN_ON(ret))
return;
ttm_bo_assign_mem(bo, new_mem);
}
EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
/**
* ttm_bo_pipeline_gutting - purge the contents of a bo
* @bo: The buffer object
*
* Purge the contents of a bo, async if the bo is not idle.
* After a successful call, the bo is left unpopulated in
* system placement. The function may wait uninterruptible
* for idle on OOM.
*
* Return: 0 if successful, negative error code on failure.
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
struct ttm_buffer_object *ghost;
struct ttm_tt *ttm;
int ret;
/* If already idle, no need for ghost object dance. */
if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->resource);
return 0;
}
/*
* We need an unpopulated ttm_tt after giving our current one,
* if any, to the ghost object. And we can't afford to fail
* creating one *after* the operation. If the bo subsequently gets
* resurrected, make sure it's cleared (if ttm_bo_type_device)
* to avoid leaking sensitive information to user-space.
*/
ttm = bo->ttm;
bo->ttm = NULL;
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm);
if (ret)
return ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
goto error_destroy_tt;
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret) {
dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
}
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
return 0;
error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
| linux-master | drivers/gpu/drm/ttm/ttm_bo_util.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
/*
* ttm_global_mutex - protecting the global state
*/
static DEFINE_MUTEX(ttm_global_mutex);
static unsigned ttm_glob_use_count;
struct ttm_global ttm_glob;
EXPORT_SYMBOL(ttm_glob);
struct dentry *ttm_debugfs_root;
static void ttm_global_release(void)
{
struct ttm_global *glob = &ttm_glob;
mutex_lock(&ttm_global_mutex);
if (--ttm_glob_use_count > 0)
goto out;
ttm_pool_mgr_fini();
debugfs_remove(ttm_debugfs_root);
__free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob));
out:
mutex_unlock(&ttm_global_mutex);
}
static int ttm_global_init(void)
{
struct ttm_global *glob = &ttm_glob;
unsigned long num_pages, num_dma32;
struct sysinfo si;
int ret = 0;
mutex_lock(&ttm_global_mutex);
if (++ttm_glob_use_count > 1)
goto out;
si_meminfo(&si);
ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
if (IS_ERR(ttm_debugfs_root)) {
ttm_debugfs_root = NULL;
}
/* Limit the number of pages in the pool to about 50% of the total
* system memory.
*/
num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
num_pages /= 2;
/* But for DMA32 we limit ourself to only use 2GiB maximum. */
num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
>> PAGE_SHIFT;
num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&glob->device_list);
atomic_set(&glob->bo_count, 0);
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
&glob->bo_count);
out:
if (ret && ttm_debugfs_root)
debugfs_remove(ttm_debugfs_root);
if (ret)
--ttm_glob_use_count;
mutex_unlock(&ttm_global_mutex);
return ret;
}
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
*/
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_device *bdev;
int ret = 0;
mutex_lock(&ttm_global_mutex);
list_for_each_entry(bdev, &glob->device_list, device_list) {
ret = ttm_device_swapout(bdev, ctx, gfp_flags);
if (ret > 0) {
list_move_tail(&bdev->device_list, &glob->device_list);
break;
}
}
mutex_unlock(&ttm_global_mutex);
return ret;
}
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
struct ttm_resource *res;
unsigned i;
int ret;
spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
ttm_resource_manager_for_each_res(man, &cursor, res) {
struct ttm_buffer_object *bo = res->bo;
uint32_t num_pages;
if (!bo || bo->resource != res)
continue;
num_pages = PFN_UP(bo->base.size);
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
return num_pages;
if (ret != -EBUSY)
return ret;
}
}
spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
/**
* ttm_device_init
*
* @bdev: A pointer to a struct ttm_device to initialize.
* @funcs: Function table for the device.
* @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
* @use_dma_alloc: If coherent DMA allocation API should be used.
* @use_dma32: If we should use GFP_DMA32 for device memory allocations.
*
* Initializes a struct ttm_device:
* Returns:
* !0: Failure.
*/
int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32)
{
struct ttm_global *glob = &ttm_glob;
int ret;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
ret = ttm_global_init();
if (ret)
return ret;
bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
if (!bdev->wq) {
ttm_global_release();
return -ENOMEM;
}
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);
return 0;
}
EXPORT_SYMBOL(ttm_device_init);
void ttm_device_fini(struct ttm_device *bdev)
{
struct ttm_resource_manager *man;
unsigned i;
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
drain_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&bdev->lru_lock);
ttm_pool_fini(&bdev->pool);
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct list_head *list)
{
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
list_del_init(&res->lru);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_bo_put(bo);
spin_lock(&bdev->lru_lock);
}
spin_unlock(&bdev->lru_lock);
}
void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
{
struct ttm_resource_manager *man;
unsigned int i, j;
ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
}
}
EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
| linux-master | drivers/gpu/drm/ttm/ttm_device.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo.h>
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
struct ttm_validate_buffer *entry)
{
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
dma_resv_unlock(bo->base.resv);
}
}
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
if (list_empty(list))
return;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
if (ticket)
ww_acquire_fini(ticket);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
/*
* Reserve buffers for validation.
*
* If a buffer in the list is marked for CPU access, we back off and
* wait for that buffer to become free for GPU access.
*
* If a buffer is reserved for another validation, the validator with
* the highest validation sequence backs off and waits for that buffer
* to become unreserved. This prevents deadlocks when validating multiple
* buffers in different orders.
*/
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups)
{
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
unsigned int num_fences;
ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
list_add(&safe->head, dups);
continue;
}
num_fences = max(entry->num_shared, 1u);
if (!ret) {
ret = dma_resv_reserve_fences(bo->base.resv,
num_fences);
if (!ret)
continue;
}
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
ttm_eu_backoff_reservation_reverse(list, entry);
if (ret == -EDEADLK) {
ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
}
if (!ret)
ret = dma_resv_reserve_fences(bo->base.resv,
num_fences);
if (unlikely(ret != 0)) {
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
}
return ret;
}
/* move this item to the front of the list,
* forces correct iteration of the loop without keeping track
*/
list_del(&entry->head);
list_add(&entry->head, list);
}
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
if (list_empty(list))
return;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
if (ticket)
ww_acquire_fini(ticket);
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
| linux-master | drivers/gpu/drm/ttm/ttm_execbuf_util.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
* Jerome Glisse
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pgtable.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_caching.h>
#include "ttm_module.h"
/**
* DOC: TTM
*
* TTM is a memory manager for accelerator devices with dedicated memory.
*
* The basic idea is that resources are grouped together in buffer objects of
* certain size and TTM handles lifetime, movement and CPU mappings of those
* objects.
*
* TODO: Add more design background and information here.
*/
/**
* ttm_prot_from_caching - Modify the page protection according to the
* ttm cacing mode
* @caching: The ttm caching mode
* @tmp: The original page protection
*
* Return: The modified page protection
*/
pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
{
/* Cached mappings need no adjustment */
if (caching == ttm_cached)
return tmp;
#if defined(__i386__) || defined(__x86_64__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
#ifndef CONFIG_UML
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif /* CONFIG_UML */
#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/ttm/ttm_module.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h"
static unsigned long ttm_pages_limit;
MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
static unsigned long ttm_dma32_pages_limit;
MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
static atomic_long_t ttm_pages_allocated;
static atomic_long_t ttm_dma32_pages_allocated;
/*
* Allocates a ttm structure for the given BO.
*/
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_device *bdev = bo->bdev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
if (bo->ttm)
return 0;
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
return 0;
}
/*
* Allocates storage for pointers to the pages that back the ttm.
*/
static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
return 0;
}
static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
sizeof(*ttm->dma_address), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
return 0;
}
static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
GFP_KERNEL);
if (!ttm->dma_address)
return -ENOMEM;
return 0;
}
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
uint32_t page_flags,
enum ttm_caching caching,
unsigned long extra_pages)
{
ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching,
unsigned long extra_pages)
{
ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
if (ttm->pages)
kvfree(ttm->pages);
else
kvfree(ttm->dma_address);
ttm->pages = NULL;
ttm->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching)
{
int ret;
ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);
else
ret = ttm_dma_tt_alloc_page_directory(ttm);
if (ret) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(ttm_sg_tt_init);
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
gfp_t gfp_mask;
int i, ret;
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
swap_space = swap_storage->f_mapping;
gfp_mask = mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page_gfp(swap_space, i,
gfp_mask);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
}
to_page = ttm->pages[i];
if (unlikely(to_page == NULL)) {
ret = -ENOMEM;
goto out_err;
}
copy_highpage(to_page, from_page);
put_page(from_page);
}
fput(swap_storage);
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
out_err:
return ret;
}
/**
* ttm_tt_swapout - swap out tt object
*
* @bdev: TTM device structure.
* @ttm: The struct ttm_tt.
* @gfp_flags: Flags to use for memory allocation.
*
* Swapout a TT object to a shmem_file, return number of pages swapped out or
* negative error code.
*/
int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
gfp_t gfp_flags)
{
loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
int i, ret;
swap_storage = shmem_file_setup("ttm swap", size, 0);
if (IS_ERR(swap_storage)) {
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
swap_space = swap_storage->f_mapping;
gfp_flags &= mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
}
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
put_page(to_page);
}
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages;
out_err:
fput(swap_storage);
return ret;
}
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
int ret;
if (!ttm)
return -EINVAL;
if (ttm_tt_is_populated(ttm))
return 0;
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
atomic_long_read(&ttm_dma32_pages_allocated) >
ttm_dma32_pages_limit) {
ret = ttm_global_swapout(ctx, GFP_KERNEL);
if (ret == 0)
break;
if (ret < 0)
goto error;
}
if (bdev->funcs->ttm_tt_populate)
ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
else
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
goto error;
ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm);
return ret;
}
}
return 0;
error:
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
return ret;
}
EXPORT_SYMBOL(ttm_tt_populate);
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
if (bdev->funcs->ttm_tt_unpopulate)
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
#ifdef CONFIG_DEBUG_FS
/* Test the shrinker functions and dump the result */
static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
{
struct ttm_operation_ctx ctx = { false, false };
seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
#endif
/*
* ttm_tt_mgr_init - register with the MM shrinker
*
* Register with the MM shrinker for swapping out BOs.
*/
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
{
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_tt_debugfs_shrink_fops);
#endif
if (!ttm_pages_limit)
ttm_pages_limit = num_pages;
if (!ttm_dma32_pages_limit)
ttm_dma32_pages_limit = num_dma32_pages;
}
static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_tt *iter_tt =
container_of(iter, typeof(*iter_tt), base);
iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
iter_tt->prot));
}
static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
struct iosys_map *map)
{
kunmap_local(map->vaddr);
}
static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
.map_local = ttm_kmap_iter_tt_map_local,
.unmap_local = ttm_kmap_iter_tt_unmap_local,
.maps_tt = true,
};
/**
* ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
* @iter_tt: The struct ttm_kmap_iter_tt to initialize.
* @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
*
* Return: Pointer to the embedded struct ttm_kmap_iter.
*/
struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
struct ttm_tt *tt)
{
iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
iter_tt->tt = tt;
if (tt)
iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
else
iter_tt->prot = PAGE_KERNEL;
return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
unsigned long ttm_tt_pages_limit(void)
{
return ttm_pages_limit;
}
EXPORT_SYMBOL(ttm_tt_pages_limit);
| linux-master | drivers/gpu/drm/ttm/ttm_tt.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
* Keith Packard.
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <asm/agp.h>
struct ttm_agp_backend {
struct ttm_tt ttm;
struct agp_memory *mem;
struct agp_bridge_data *bridge;
};
int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm_glob.dummy_read_page;
struct agp_memory *mem;
int ret, cached = ttm->caching == ttm_cached;
unsigned i;
if (agp_be->mem)
return 0;
mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
mem->page_count = 0;
for (i = 0; i < ttm->num_pages; i++) {
struct page *page = ttm->pages[i];
if (!page)
page = dummy_read_page;
mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
ret = agp_bind_memory(mem, bo_mem->start);
if (ret)
pr_err("AGP Bind memory failed\n");
return ret;
}
EXPORT_SYMBOL(ttm_agp_bind);
void ttm_agp_unbind(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) {
if (agp_be->mem->is_bound) {
agp_unbind_memory(agp_be->mem);
return;
}
agp_free_memory(agp_be->mem);
agp_be->mem = NULL;
}
}
EXPORT_SYMBOL(ttm_agp_unbind);
bool ttm_agp_is_bound(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (!ttm)
return false;
return (agp_be->mem != NULL);
}
EXPORT_SYMBOL(ttm_agp_is_bound);
void ttm_agp_destroy(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem)
ttm_agp_unbind(ttm);
ttm_tt_fini(ttm);
kfree(agp_be);
}
EXPORT_SYMBOL(ttm_agp_destroy);
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
uint32_t page_flags)
{
struct ttm_agp_backend *agp_be;
agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
if (!agp_be)
return NULL;
agp_be->mem = NULL;
agp_be->bridge = bridge;
if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined, 0)) {
kfree(agp_be);
return NULL;
}
return &agp_be->ttm;
}
EXPORT_SYMBOL(ttm_agp_tt_create);
| linux-master | drivers/gpu/drm/ttm/ttm_agp_backend.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/dma-resv.h>
#include "ttm_module.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct drm_printer p = drm_debug_printer(TTM_PFX);
struct ttm_resource_manager *man;
int i, mem_type;
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i].flags, mem_type);
man = ttm_manager_type(bo->bdev, mem_type);
ttm_resource_manager_debug(man, &p);
}
}
/**
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
if (bo->resource)
ttm_resource_move_to_lru_tail(bo->resource);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
/**
* ttm_bo_set_bulk_move - update BOs bulk move object
*
* @bo: The buffer object.
* @bulk: bulk move structure
*
* Update the BOs bulk move object, making sure that resources are added/removed
* as well. A bulk move allows to move many resource on the LRU at once,
* resulting in much less overhead of maintaining the LRU.
* The only requirement is that the resources stay together on the LRU and are
* never separated. This is enforces by setting the bulk_move structure on a BO.
* ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
* their LRU list.
*/
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk)
{
dma_resv_assert_held(bo->base.resv);
if (bo->bulk_move == bulk)
return;
spin_lock(&bo->bdev->lru_lock);
if (bo->resource)
ttm_resource_del_bulk_move(bo->resource, bo);
bo->bulk_move = bulk;
if (bo->resource)
ttm_resource_add_bulk_move(bo->resource, bo);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_set_bulk_move);
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_resource *mem, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
struct ttm_device *bdev = bo->bdev;
bool old_use_tt, new_use_tt;
int ret;
old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
/*
* Create and bind a ttm if required.
*/
if (new_use_tt) {
/* Zero init the new TTM structure if the old location should
* have used one as well.
*/
ret = ttm_tt_create(bo, old_use_tt);
if (ret)
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret)
goto out_err;
}
}
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (ret)
goto out_err;
ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
if (ret) {
if (ret == -EMULTIHOP)
return ret;
goto out_err;
}
ctx->bytes_moved += bo->base.size;
return 0;
out_err:
if (!old_use_tt)
ttm_bo_tt_destroy(bo);
return ret;
}
/*
* Call bo::reserved.
* Will release GPU memory type usage on destruction.
* This is the place to put in driver specific hooks to release
* driver private resources.
* Will release the bo::reserved lock.
*/
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
if (bo->bdev->funcs->delete_mem_notify)
bo->bdev->funcs->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->resource);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
int r;
if (bo->base.resv == &bo->base._resv)
return 0;
BUG_ON(!dma_resv_trylock(&bo->base._resv));
r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
dma_resv_unlock(&bo->base._resv);
if (r)
return r;
if (bo->type != ttm_bo_type_sg) {
/* This works because the BO is about to be destroyed and nobody
* reference it any more. The only tricky case is the trylock on
* the resv object while holding the lru_lock.
*/
spin_lock(&bo->bdev->lru_lock);
bo->base.resv = &bo->base._resv;
spin_unlock(&bo->bdev->lru_lock);
}
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct dma_resv *resv = &bo->base._resv;
struct dma_resv_iter cursor;
struct dma_fence *fence;
dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
dma_resv_iter_end(&cursor);
}
/**
* ttm_bo_cleanup_refs
* If bo idle, remove from lru lists, and unref.
* If not idle, block if possible.
*
* Must be called with lru_lock and reservation held, this function
* will drop the lru lock and optionally the reservation lock before returning.
*
* @bo: The buffer object to clean-up
* @interruptible: Any sleeps should occur interruptibly.
* @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
* @unlock_resv: Unlock the reservation lock as well.
*/
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
struct dma_resv *resv = &bo->base._resv;
int ret;
if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
ret = 0;
else
ret = -EBUSY;
if (ret && !no_wait_gpu) {
long lret;
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
interruptible,
30 * HZ);
if (lret < 0)
return lret;
else if (lret == 0)
return -EBUSY;
spin_lock(&bo->bdev->lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
* and is probably busy in ttm_bo_cleanup_memtype_use.
*
* Even if it's not the case, because we finished waiting any
* delayed destruction would succeed, so just return success
* here.
*/
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
ret = 0;
}
if (ret) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
return ret;
}
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
return 0;
}
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
*/
static void ttm_bo_delayed_delete(struct work_struct *work)
{
struct ttm_buffer_object *bo;
bo = container_of(work, typeof(*bo), delayed_delete);
dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
}
static void ttm_bo_release(struct kref *kref)
{
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_device *bdev = bo->bdev;
int ret;
WARN_ON_ONCE(bo->pin_count);
WARN_ON_ONCE(bo->bulk_move);
if (!bo->deleted) {
ret = ttm_bo_individualize_resv(bo);
if (ret) {
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP, false,
30 * HZ);
}
if (bo->bdev->funcs->release_notify)
bo->bdev->funcs->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
if (!dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
spin_lock(&bo->bdev->lru_lock);
/*
* Make pinned bos immediately available to
* shrinkers, now that they are queued for
* destruction.
*
* FIXME: QXL is triggering this. Can be removed when the
* driver is fixed.
*/
if (bo->pin_count) {
bo->pin_count = 0;
ttm_resource_move_to_lru_tail(bo->resource);
}
kref_init(&bo->kref);
spin_unlock(&bo->bdev->lru_lock);
INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
queue_work(bdev->wq, &bo->delayed_delete);
return;
}
ttm_bo_cleanup_memtype_use(bo);
dma_resv_unlock(bo->base.resv);
}
atomic_dec(&ttm_glob.bo_count);
bo->destroy(bo);
}
/**
* ttm_bo_put
*
* @bo: The buffer object.
*
* Unreference a buffer object.
*/
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_put);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
struct ttm_placement hop_placement;
struct ttm_resource *hop_mem;
int ret;
hop_placement.num_placement = hop_placement.num_busy_placement = 1;
hop_placement.placement = hop_placement.busy_placement = hop;
/* find space in the bounce domain */
ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
if (ret)
return ret;
/* move to the bounce domain */
ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
if (ret) {
ttm_resource_free(bo, &hop_mem);
return ret;
}
return 0;
}
static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource *evict_mem;
struct ttm_placement placement;
struct ttm_place hop;
int ret = 0;
memset(&hop, 0, sizeof(hop));
dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
placement.num_busy_placement = 0;
bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
/*
* Since we've already synced, this frees backing store
* immediately.
*/
return ttm_bo_pipeline_gutting(bo);
}
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
bo);
ttm_bo_mem_space_debug(bo, &placement);
}
goto out;
}
do {
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (ret != -EMULTIHOP)
break;
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
} while (!ret);
if (ret) {
ttm_resource_free(bo, &evict_mem);
if (ret != -ERESTARTSYS && ret != -EINTR)
pr_err("Buffer eviction failed\n");
}
out:
return ret;
}
/**
* ttm_bo_eviction_valuable
*
* @bo: The buffer object to evict
* @place: the placement we need to make room for
*
* Check if it is valuable to evict the BO to make room for the given placement.
*/
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
struct ttm_resource *res = bo->resource;
struct ttm_device *bdev = bo->bdev;
dma_resv_assert_held(bo->base.resv);
if (bo->resource->mem_type == TTM_PL_SYSTEM)
return true;
/* Don't evict this BO if it's outside of the
* requested placement range
*/
return ttm_resource_intersects(bdev, res, place, bo->base.size);
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
/*
* Check the target bo is allowable to be evicted or swapout, including cases:
*
* a. if share same reservation object with ctx->resv, have assumption
* reservation objects should already be locked, so not lock again and
* return true directly when either the opreation allow_reserved_eviction
* or the target bo already is in delayed free list;
*
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
const struct ttm_place *place,
bool *locked, bool *busy)
{
bool ret = false;
if (bo->pin_count) {
*locked = false;
if (busy)
*busy = false;
return false;
}
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
if (ctx->allow_res_evict)
ret = true;
*locked = false;
if (busy)
*busy = false;
} else {
ret = dma_resv_trylock(bo->base.resv);
*locked = ret;
if (busy)
*busy = !ret;
}
if (ret && place && (bo->resource->mem_type != place->mem_type ||
!bo->bdev->funcs->eviction_valuable(bo, place))) {
ret = false;
if (*locked) {
dma_resv_unlock(bo->base.resv);
*locked = false;
}
}
return ret;
}
/**
* ttm_mem_evict_wait_busy - wait for a busy BO to become available
*
* @busy_bo: BO which couldn't be locked with trylock
* @ctx: operation context
* @ticket: acquire ticket
*
* Try to lock a busy buffer object to avoid failing eviction.
*/
static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
{
int r;
if (!busy_bo || !ticket)
return -EBUSY;
if (ctx->interruptible)
r = dma_resv_lock_interruptible(busy_bo->base.resv,
ticket);
else
r = dma_resv_lock(busy_bo->base.resv, ticket);
/*
* TODO: It would be better to keep the BO locked until allocation is at
* least tried one more time, but that would mean a much larger rework
* of TTM.
*/
if (!r)
dma_resv_unlock(busy_bo->base.resv);
return r == -EDEADLK ? -EBUSY : r;
}
int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
struct ttm_resource_cursor cursor;
struct ttm_resource *res;
bool locked = false;
int ret;
spin_lock(&bdev->lru_lock);
ttm_resource_manager_for_each_res(man, &cursor, res) {
bool busy;
if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
&locked, &busy)) {
if (busy && !busy_bo && ticket !=
dma_resv_locking_ctx(res->bo->base.resv))
busy_bo = res->bo;
continue;
}
if (ttm_bo_get_unless_zero(res->bo)) {
bo = res->bo;
break;
}
if (locked)
dma_resv_unlock(res->bo->base.resv);
}
if (!bo) {
if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
busy_bo = NULL;
spin_unlock(&bdev->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
return ret;
}
if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
ctx->no_wait_gpu, locked);
ttm_bo_put(bo);
return ret;
}
spin_unlock(&bdev->lru_lock);
ret = ttm_bo_evict(bo, ctx);
if (locked)
ttm_bo_unreserve(bo);
else
ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_put(bo);
return ret;
}
/**
* ttm_bo_pin - Pin the buffer object.
* @bo: The buffer object to pin
*
* Make sure the buffer is not evicted any more during memory pressure.
* @bo must be unpinned again by calling ttm_bo_unpin().
*/
void ttm_bo_pin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
WARN_ON_ONCE(!kref_read(&bo->kref));
spin_lock(&bo->bdev->lru_lock);
if (bo->resource)
ttm_resource_del_bulk_move(bo->resource, bo);
++bo->pin_count;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_pin);
/**
* ttm_bo_unpin - Unpin the buffer object.
* @bo: The buffer object to unpin
*
* Allows the buffer object to be evicted again during memory pressure.
*/
void ttm_bo_unpin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
WARN_ON_ONCE(!kref_read(&bo->kref));
if (WARN_ON_ONCE(!bo->pin_count))
return;
spin_lock(&bo->bdev->lru_lock);
--bo->pin_count;
if (bo->resource)
ttm_resource_add_bulk_move(bo->resource, bo);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unpin);
/*
* Add the last move fence to the BO as kernel dependency and reserve a new
* fence slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
struct ttm_resource *mem,
bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (!fence)
return 0;
if (no_wait_gpu) {
ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
dma_fence_put(fence);
return ret;
}
dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
dma_fence_put(fence);
return ret;
}
/*
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
struct ww_acquire_ctx *ticket;
int ret;
man = ttm_manager_type(bdev, place->mem_type);
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = ttm_resource_alloc(bo, place, mem);
if (likely(!ret))
break;
if (unlikely(ret != -ENOSPC))
return ret;
ret = ttm_mem_evict_first(bdev, man, place, ctx,
ticket);
if (unlikely(ret != 0))
return ret;
} while (1);
return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
}
/**
* ttm_bo_mem_space
*
* @bo: Pointer to a struct ttm_buffer_object. the data of which
* we want to allocate space for.
* @placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_resource.
* @ctx: if and how to sleep, lock buffers and alloc memory
*
* Allocate memory space for the buffer object pointed to by @bo, using
* the placement flags in @placement, potentially evicting other idle buffer objects.
* This function may sleep while waiting for space to become available.
* Returns:
* -EBUSY: No space available (only if no_wait == 1).
* -ENOMEM: Could not allocate memory for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
bool type_found = false;
int i, ret;
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
ret = ttm_resource_alloc(bo, place, mem);
if (ret == -ENOSPC)
continue;
if (unlikely(ret))
goto error;
ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
ttm_resource_free(bo, mem);
if (ret == -EBUSY)
continue;
goto error;
}
return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
if (likely(!ret))
return 0;
if (ret && ret != -EBUSY)
goto error;
}
ret = -ENOMEM;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
ret = -EINVAL;
}
error:
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
struct ttm_resource *mem;
struct ttm_place hop;
int ret;
dma_resv_assert_held(bo->base.resv);
/*
* Determine where to move the buffer.
*
* If driver determines move is going to need
* an extra step then it will return -EMULTIHOP
* and the buffer will be moved to the temporary
* stop and the driver will be called to make
* the second hop.
*/
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
return ret;
bounce:
ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
goto out;
/* try and move to final place now. */
goto bounce;
}
out:
if (ret)
ttm_resource_free(bo, &mem);
return ret;
}
/**
* ttm_bo_validate
*
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @ctx: validation parameters.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
* Returns
* -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy.
* -ERESTARTSYS if interrupted by a signal.
*/
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
int ret;
dma_resv_assert_held(bo->base.resv);
/*
* Remove the backing store if no placement is given.
*/
if (!placement->num_placement && !placement->num_busy_placement)
return ttm_bo_pipeline_gutting(bo);
/* Check whether we need to move buffer. */
if (bo->resource && ttm_resource_compat(bo->resource, placement))
return 0;
/* Moving of pinned BOs is forbidden */
if (bo->pin_count)
return -EINVAL;
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
/*
* We might need to add a TTM.
*/
if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_validate);
/**
* ttm_bo_init_reserved
*
* @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @type: Requested type of buffer object.
* @placement: Initial placement for buffer object.
* @alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
* @sg: Scatter-gather table.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
* As this object may be part of a larger structure, this function,
* together with the @destroy function, enables driver-specific objects
* derived from a ttm_buffer_object.
*
* On successful return, the caller owns an object kref to @bo. The kref and
* list_kref are usually set to 1, but note that in some situations, other
* tasks may already be holding references to @bo as well.
* Furthermore, if resv == NULL, the buffer's reservation lock will be held,
* and it is the caller's responsibility to call ttm_bo_unreserve.
*
* If a failure occurs, the function will call the @destroy function. Thus,
* after a failure, dereferencing @bo is illegal and will likely cause memory
* corruption.
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
enum ttm_bo_type type, struct ttm_placement *placement,
uint32_t alignment, struct ttm_operation_ctx *ctx,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
int ret;
kref_init(&bo->kref);
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = alignment;
bo->destroy = destroy;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
if (resv)
bo->base.resv = resv;
else
bo->base.resv = &bo->base._resv;
atomic_inc(&ttm_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
PFN_UP(bo->base.size));
if (ret)
goto err_put;
}
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv)
WARN_ON(!dma_resv_trylock(bo->base.resv));
else
dma_resv_assert_held(resv);
ret = ttm_bo_validate(bo, placement, ctx);
if (unlikely(ret))
goto err_unlock;
return 0;
err_unlock:
if (!resv)
dma_resv_unlock(bo->base.resv);
err_put:
ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_init_reserved);
/**
* ttm_bo_init_validate
*
* @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @type: Requested type of buffer object.
* @placement: Initial placement for buffer object.
* @alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @sg: Scatter-gather table.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
* As this object may be part of a larger structure, this function,
* together with the @destroy function,
* enables driver-specific objects derived from a ttm_buffer_object.
*
* On successful return, the caller owns an object kref to @bo. The kref and
* list_kref are usually set to 1, but note that in some situations, other
* tasks may already be holding references to @bo as well.
*
* If a failure occurs, the function will call the @destroy function, Thus,
* after a failure, dereferencing @bo is illegal and will likely cause memory
* corruption.
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
enum ttm_bo_type type, struct ttm_placement *placement,
uint32_t alignment, bool interruptible,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
sg, resv, destroy);
if (ret)
return ret;
if (!resv)
ttm_bo_unreserve(bo);
return 0;
}
EXPORT_SYMBOL(ttm_bo_init_validate);
/*
* buffer object vm functions.
*/
/**
* ttm_bo_unmap_virtual
*
* @bo: tear down the virtual mappings for this BO
*/
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free(bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
/**
* ttm_bo_wait_ctx - wait for buffer idle.
*
* @bo: The buffer object.
* @ctx: defines how to wait
*
* Waits for the buffer to be idle. Used timeout depends on the context.
* Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
* zero on success.
*/
int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
{
long ret;
if (ctx->no_wait_gpu) {
if (dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
ctx->interruptible, 15 * HZ);
if (unlikely(ret < 0))
return ret;
if (unlikely(ret == 0))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
struct ttm_place place;
bool locked;
long ret;
/*
* While the bo may already reside in SYSTEM placement, set
* SYSTEM as new placement to cover also the move further below.
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
memset(&place, 0, sizeof(place));
place.mem_type = bo->resource->mem_type;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
return -EBUSY;
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
}
if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
return ret == -EBUSY ? -ENOSPC : ret;
}
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
/*
* Move to system cached
*/
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
struct ttm_resource *evict_mem;
struct ttm_place hop;
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
if (unlikely(ret))
goto out;
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
}
/*
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
if (unlikely(ret != 0))
goto out;
ttm_bo_unmap_virtual(bo);
/*
* Swap out. Buffer will be swapped in again as soon as
* anyone tries to access a ttm page.
*/
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(bo->ttm))
ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
out:
/*
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer.
*/
if (locked)
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
return ret == -EBUSY ? -ENOSPC : ret;
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{
if (bo->ttm == NULL)
return;
ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
| linux-master | drivers/gpu/drm/ttm/ttm_bo.c |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/slab.h>
#include "ttm_module.h"
static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
*res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!*res)
return -ENOMEM;
ttm_resource_init(bo, place, *res);
return 0;
}
static void ttm_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
ttm_resource_fini(man, res);
kfree(res);
}
static const struct ttm_resource_manager_func ttm_sys_manager_func = {
.alloc = ttm_sys_man_alloc,
.free = ttm_sys_man_free,
};
void ttm_sys_man_init(struct ttm_device *bdev)
{
struct ttm_resource_manager *man = &bdev->sysman;
/*
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
man->use_tt = true;
man->func = &ttm_sys_manager_func;
ttm_resource_manager_init(man, bdev, 0);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
}
| linux-master | drivers/gpu/drm/ttm/ttm_sys_manager.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
/* Pooling of allocated pages is necessary because changing the caching
* attributes on x86 of the linear mapping requires a costly cross CPU TLB
* invalidate for those addresses.
*
* Additional to that allocations from the DMA coherent API are pooled as well
* cause they are rather slow compared to alloc_pages+map.
*/
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/sched/mm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
* @addr: original DMA address returned for the mapping
* @vaddr: original vaddr return for the mapping and order in the lower bits
*/
struct ttm_pool_dma {
dma_addr_t addr;
unsigned long vaddr;
};
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
/* Allocate pages of size 1 << order with the given gfp_flags */
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
unsigned int order)
{
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
struct page *p;
void *vaddr;
/* Don't set the __GFP_COMP flag for higher order allocations.
* Mapping pages directly into an userspace process and calling
* put_page() on a TTM allocated page is illegal.
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p)
p->private = order;
return p;
}
dma = kmalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
return NULL;
if (order)
attr |= DMA_ATTR_NO_WARN;
vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
&dma->addr, gfp_flags, attr);
if (!vaddr)
goto error_free;
/* TODO: This is an illegal abuse of the DMA API, but we need to rework
* TTM page fault handling and extend the DMA API to clean this up.
*/
if (is_vmalloc_addr(vaddr))
p = vmalloc_to_page(vaddr);
else
p = virt_to_page(vaddr);
dma->vaddr = (unsigned long)vaddr | order;
p->private = (unsigned long)dma;
return p;
error_free:
kfree(dma);
return NULL;
}
/* Reset the caching and pages of size 1 << order */
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
unsigned int order, struct page *p)
{
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
void *vaddr;
#ifdef CONFIG_X86
/* We don't care that set_pages_wb is inefficient here. This is only
* used when we have to shrink and CPU overhead is irrelevant then.
*/
if (caching != ttm_cached && !PageHighMem(p))
set_pages_wb(p, 1 << order);
#endif
if (!pool || !pool->use_dma_alloc) {
__free_pages(p, order);
return;
}
if (order)
attr |= DMA_ATTR_NO_WARN;
dma = (void *)p->private;
vaddr = (void *)(dma->vaddr & PAGE_MASK);
dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
attr);
kfree(dma);
}
/* Apply a new caching to an array of pages */
static int ttm_pool_apply_caching(struct page **first, struct page **last,
enum ttm_caching caching)
{
#ifdef CONFIG_X86
unsigned int num_pages = last - first;
if (!num_pages)
return 0;
switch (caching) {
case ttm_cached:
break;
case ttm_write_combined:
return set_pages_array_wc(first, num_pages);
case ttm_uncached:
return set_pages_array_uc(first, num_pages);
}
#endif
return 0;
}
/* Map pages of 1 << order size and fill the DMA address array */
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
struct page *p, dma_addr_t **dma_addr)
{
dma_addr_t addr;
unsigned int i;
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
addr = dma->addr;
} else {
size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(pool->dev, addr))
return -EFAULT;
}
for (i = 1 << order; i ; --i) {
*(*dma_addr)++ = addr;
addr += PAGE_SIZE;
}
return 0;
}
/* Unmap pages of 1 << order size */
static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
unsigned int num_pages)
{
/* Unmapped while freeing the page */
if (pool->use_dma_alloc)
return;
dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
DMA_BIDIRECTIONAL);
}
/* Give pages into a specific pool_type */
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
{
unsigned int i, num_pages = 1 << pt->order;
for (i = 0; i < num_pages; ++i) {
if (PageHighMem(p))
clear_highpage(p + i);
else
clear_page(page_address(p + i));
}
spin_lock(&pt->lock);
list_add(&p->lru, &pt->pages);
spin_unlock(&pt->lock);
atomic_long_add(1 << pt->order, &allocated_pages);
}
/* Take pages from a specific pool_type, return NULL when nothing available */
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
{
struct page *p;
spin_lock(&pt->lock);
p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
if (p) {
atomic_long_sub(1 << pt->order, &allocated_pages);
list_del(&p->lru);
}
spin_unlock(&pt->lock);
return p;
}
/* Initialize and add a pool type to the global shrinker list */
static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
enum ttm_caching caching, unsigned int order)
{
pt->pool = pool;
pt->caching = caching;
pt->order = order;
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p;
spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
}
/* Return the pool_type to use for the given caching and order */
static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
if (pool->use_dma32)
return &global_dma32_uncached[order];
return &global_uncached[order];
default:
break;
}
#endif
return NULL;
}
/* Free pages using the global shrinker list */
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
unsigned int num_pages;
struct page *p;
spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
list_move_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
num_pages = 1 << pt->order;
} else {
num_pages = 0;
}
return num_pages;
}
/* Return the allocation order based for a page */
static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
{
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
return dma->vaddr & ~PAGE_MASK;
}
return p->private;
}
/* Called when we got a page, either from a pool or newly allocated */
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
struct page *p, dma_addr_t **dma_addr,
unsigned long *num_pages,
struct page ***pages)
{
unsigned int i;
int r;
if (*dma_addr) {
r = ttm_pool_map(pool, order, p, dma_addr);
if (r)
return r;
}
*num_pages -= 1 << order;
for (i = 1 << order; i; --i, ++(*pages), ++p)
**pages = p;
return 0;
}
/**
* ttm_pool_free_range() - Free a range of TTM pages
* @pool: The pool used for allocating.
* @tt: The struct ttm_tt holding the page pointers.
* @caching: The page caching mode used by the range.
* @start_page: index for first page to free.
* @end_page: index for last page to free + 1.
*
* During allocation the ttm_tt page-vector may be populated with ranges of
* pages with different attributes if allocation hit an error without being
* able to completely fulfill the allocation. This function can be used
* to free these individual ranges.
*/
static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = tt->pages;
unsigned int order;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
struct ttm_pool_type *pt = NULL;
order = ttm_pool_page_order(pool, *pages);
nr = (1UL << order);
if (tt->dma_address)
ttm_pool_unmap(pool, tt->dma_address[i], nr);
pt = ttm_pool_select_type(pool, caching, order);
if (pt)
ttm_pool_type_give(pt, *pages);
else
ttm_pool_free_page(pool, caching, order, *pages);
}
}
/**
* ttm_pool_alloc - Fill a ttm_tt object
*
* @pool: ttm_pool to use
* @tt: ttm_tt object to fill
* @ctx: operation context
*
* Fill the ttm_tt object with pages and also make sure to DMA map them when
* necessary.
*
* Returns: 0 on successe, negative error code otherwise.
*/
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
pgoff_t num_pages = tt->num_pages;
dma_addr_t *dma_addr = tt->dma_address;
struct page **caching = tt->pages;
struct page **pages = tt->pages;
enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
pgoff_t caching_divide;
unsigned int order;
struct page *p;
int r;
WARN_ON(!num_pages || ttm_tt_is_populated(tt));
WARN_ON(dma_addr && !pool->dev);
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail)
gfp_flags |= __GFP_RETRY_MAYFAIL;
if (pool->use_dma32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt;
page_caching = tt->caching;
pt = ttm_pool_select_type(pool, tt->caching, order);
p = pt ? ttm_pool_type_take(pt) : NULL;
if (p) {
r = ttm_pool_apply_caching(caching, pages,
tt->caching);
if (r)
goto error_free_page;
caching = pages;
do {
r = ttm_pool_page_allocated(pool, order, p,
&dma_addr,
&num_pages,
&pages);
if (r)
goto error_free_page;
caching = pages;
if (num_pages < (1 << order))
break;
p = ttm_pool_type_take(pt);
} while (p);
}
page_caching = ttm_cached;
while (num_pages >= (1 << order) &&
(p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
if (PageHighMem(p)) {
r = ttm_pool_apply_caching(caching, pages,
tt->caching);
if (r)
goto error_free_page;
caching = pages;
}
r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
&num_pages, &pages);
if (r)
goto error_free_page;
if (PageHighMem(p))
caching = pages;
}
if (!p) {
if (order) {
--order;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
}
r = ttm_pool_apply_caching(caching, pages, tt->caching);
if (r)
goto error_free_all;
return 0;
error_free_page:
ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
num_pages = tt->num_pages - num_pages;
caching_divide = caching - tt->pages;
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
return r;
}
EXPORT_SYMBOL(ttm_pool_alloc);
/**
* ttm_pool_free - Free the backing pages from a ttm_tt object
*
* @pool: Pool to give pages back to.
* @tt: ttm_tt object to unpopulate
*
* Give the packing pages back to a pool or free them
*/
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
{
ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
while (atomic_long_read(&allocated_pages) > page_pool_size)
ttm_pool_shrink();
}
EXPORT_SYMBOL(ttm_pool_free);
/**
* ttm_pool_init - Initialize a pool
*
* @pool: the pool to initialize
* @dev: device for DMA allocations and mappings
* @nid: NUMA node to use for allocations
* @use_dma_alloc: true if coherent DMA alloc should be used
* @use_dma32: true if GFP_DMA32 should be used
*
* Initialize the pool and its pool types.
*/
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
int nid, bool use_dma_alloc, bool use_dma32)
{
unsigned int i, j;
WARN_ON(!dev && use_dma_alloc);
pool->dev = dev;
pool->nid = nid;
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
}
EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
*
* @pool: the pool to clean up
*
* Free all pages in the pool and unregister the types from the global
* shrinker.
*/
void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
/* We removed the pool types from the LRU, but we need to also make sure
* that no shrinker is concurrently freeing pages from the pool.
*/
synchronize_shrinkers();
}
EXPORT_SYMBOL(ttm_pool_fini);
/* As long as pages are available make sure to release at least one */
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
unsigned long num_freed = 0;
do
num_freed += ttm_pool_shrink();
while (!num_freed && atomic_long_read(&allocated_pages));
return num_freed;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
struct shrink_control *sc)
{
unsigned long num_pages = atomic_long_read(&allocated_pages);
return num_pages ? num_pages : SHRINK_EMPTY;
}
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
{
unsigned int count = 0;
struct page *p;
spin_lock(&pt->lock);
/* Only used for debugfs, the overhead doesn't matter */
list_for_each_entry(p, &pt->pages, lru)
++count;
spin_unlock(&pt->lock);
return count;
}
/* Print a nice header for the order */
static void ttm_pool_debugfs_header(struct seq_file *m)
{
unsigned int i;
seq_puts(m, "\t ");
for (i = 0; i <= MAX_ORDER; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
/* Dump information about the different pool types */
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
struct seq_file *m)
{
unsigned int i;
for (i = 0; i <= MAX_ORDER; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
/* Dump the total amount of allocated pages */
static void ttm_pool_debugfs_footer(struct seq_file *m)
{
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size);
}
/* Dump the information for the global pools */
static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
ttm_pool_debugfs_orders(global_uncached, m);
seq_puts(m, "wc 32\t:");
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
/**
* ttm_pool_debugfs - Debugfs dump function for a pool
*
* @pool: the pool to dump the information for
* @m: seq_file to dump to
*
* Make a debugfs dump with the per pool and global information.
*/
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
if (!pool->use_dma_alloc) {
seq_puts(m, "unused\n");
return 0;
}
ttm_pool_debugfs_header(m);
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
break;
case ttm_write_combined:
seq_puts(m, "wc\t:");
break;
case ttm_uncached:
seq_puts(m, "uc\t:");
break;
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
}
EXPORT_SYMBOL(ttm_pool_debugfs);
/* Test the shrinker functions and dump the result */
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
struct shrink_control sc = { .gfp_mask = GFP_NOFS };
fs_reclaim_acquire(GFP_KERNEL);
seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
ttm_pool_shrinker_scan(&mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
#endif
/**
* ttm_pool_mgr_init - Initialize globals
*
* @num_pages: default number of pages
*
* Initialize the global locks and lists for the MM shrinker.
*/
int ttm_pool_mgr_init(unsigned long num_pages)
{
unsigned int i;
if (!page_pool_size)
page_pool_size = num_pages;
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i <= MAX_ORDER; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_dma32_uncached[i], NULL,
ttm_uncached, i);
}
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_globals_fops);
debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_shrink_fops);
#endif
mm_shrinker.count_objects = ttm_pool_shrinker_count;
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
mm_shrinker.seeks = 1;
return register_shrinker(&mm_shrinker, "drm-ttm_pool");
}
/**
* ttm_pool_mgr_fini - Finalize globals
*
* Cleanup the global pools and unregister the MM shrinker.
*/
void ttm_pool_mgr_fini(void)
{
unsigned int i;
for (i = 0; i <= MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
ttm_pool_type_fini(&global_dma32_write_combined[i]);
ttm_pool_type_fini(&global_dma32_uncached[i]);
}
unregister_shrinker(&mm_shrinker);
WARN_ON(!list_empty(&shrinker_list));
}
| linux-master | drivers/gpu/drm/ttm/ttm_pool.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.