python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include "pl111_nomadik.h"
#define PMU_CTRL_OFFSET 0x0000
#define PMU_CTRL_LCDNDIF BIT(26)
void pl111_nomadik_init(struct device *dev)
{
struct regmap *pmu_regmap;
/*
* Just bail out of this is not found, we could be running
* multiplatform on something else than Nomadik.
*/
pmu_regmap =
syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
if (IS_ERR(pmu_regmap))
return;
/*
* This bit in the PMU controller multiplexes the two graphics
* blocks found in the Nomadik STn8815. The other one is called
* MDIF (Master Display Interface) and gets muxed out here.
*/
regmap_update_bits(pmu_regmap,
PMU_CTRL_OFFSET,
PMU_CTRL_LCDNDIF,
0);
dev_info(dev, "set Nomadik PMU mux to CLCD mode\n");
}
EXPORT_SYMBOL_GPL(pl111_nomadik_init);
| linux-master | drivers/gpu/drm/pl111/pl111_nomadik.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2017 Broadcom
*/
#include <linux/seq_file.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include "pl111_drm.h"
#define REGDEF(reg) { reg, #reg }
static const struct {
u32 reg;
const char *name;
} pl111_reg_defs[] = {
REGDEF(CLCD_TIM0),
REGDEF(CLCD_TIM1),
REGDEF(CLCD_TIM2),
REGDEF(CLCD_TIM3),
REGDEF(CLCD_UBAS),
REGDEF(CLCD_LBAS),
REGDEF(CLCD_PL111_CNTL),
REGDEF(CLCD_PL111_IENB),
REGDEF(CLCD_PL111_RIS),
REGDEF(CLCD_PL111_MIS),
REGDEF(CLCD_PL111_ICR),
REGDEF(CLCD_PL111_UCUR),
REGDEF(CLCD_PL111_LCUR),
};
static int pl111_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct pl111_drm_dev_private *priv = dev->dev_private;
int i;
for (i = 0; i < ARRAY_SIZE(pl111_reg_defs); i++) {
seq_printf(m, "%s (0x%04x): 0x%08x\n",
pl111_reg_defs[i].name, pl111_reg_defs[i].reg,
readl(priv->regs + pl111_reg_defs[i].reg));
}
return 0;
}
static const struct drm_info_list pl111_debugfs_list[] = {
{"regs", pl111_debugfs_regs, 0},
};
void
pl111_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(pl111_debugfs_list,
ARRAY_SIZE(pl111_debugfs_list),
minor->debugfs_root, minor);
}
| linux-master | drivers/gpu/drm/pl111/pl111_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
*
* Parts of this file were based on sources as follows:
*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <[email protected]>
* Copyright (C) 2011 Texas Instruments
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "pl111_drm.h"
irqreturn_t pl111_irq(int irq, void *data)
{
struct pl111_drm_dev_private *priv = data;
u32 irq_stat;
irqreturn_t status = IRQ_NONE;
irq_stat = readl(priv->regs + CLCD_PL111_MIS);
if (!irq_stat)
return IRQ_NONE;
if (irq_stat & CLCD_IRQ_NEXTBASE_UPDATE) {
drm_crtc_handle_vblank(&priv->pipe.crtc);
status = IRQ_HANDLED;
}
/* Clear the interrupt once done */
writel(irq_stat, priv->regs + CLCD_PL111_ICR);
return status;
}
static enum drm_mode_status
pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = DIV_ROUND_UP(priv->variant->fb_depth, 8);
u64 bw;
/*
* We use the pixelclock to also account for interlaced modes, the
* resulting bandwidth is in bytes per second.
*/
bw = mode->clock * 1000ULL; /* In Hz */
bw = bw * mode->hdisplay * mode->vdisplay * cpp;
bw = div_u64(bw, mode->htotal * mode->vtotal);
/*
* If no bandwidth constraints, anything goes, else
* check if we are too fast.
*/
if (priv->memory_bw && (bw > priv->memory_bw)) {
DRM_DEBUG_KMS("%d x %d @ %d Hz, %d cpp, bw %llu too fast\n",
mode->hdisplay, mode->vdisplay,
mode->clock * 1000, cpp, bw);
return MODE_BAD;
}
DRM_DEBUG_KMS("%d x %d @ %d Hz, %d cpp, bw %llu bytes/s OK\n",
mode->hdisplay, mode->vdisplay,
mode->clock * 1000, cpp, bw);
return MODE_OK;
}
static int pl111_display_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *pstate,
struct drm_crtc_state *cstate)
{
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *old_fb = pipe->plane.state->fb;
struct drm_framebuffer *fb = pstate->fb;
if (mode->hdisplay % 16)
return -EINVAL;
if (fb) {
u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3)
return -EINVAL;
/* There's no pitch register -- the mode's hdisplay
* controls it.
*/
if (fb->pitches[0] != mode->hdisplay * fb->format->cpp[0])
return -EINVAL;
/* We can't change the FB format in a flicker-free
* manner (and only update it during CRTC enable).
*/
if (old_fb && old_fb->format != fb->format)
cstate->mode_changed = true;
}
return 0;
}
static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *cstate,
struct drm_plane_state *plane_state)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
struct drm_connector *connector = priv->connector;
struct drm_bridge *bridge = priv->bridge;
bool grayscale = false;
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
u32 cpl, tim2;
int ret;
ret = clk_set_rate(priv->clk, mode->clock * 1000);
if (ret) {
dev_err(drm->dev,
"Failed to set pixel clock rate to %d: %d\n",
mode->clock * 1000, ret);
}
clk_prepare_enable(priv->clk);
ppl = (mode->hdisplay / 16) - 1;
hsw = mode->hsync_end - mode->hsync_start - 1;
hfp = mode->hsync_start - mode->hdisplay - 1;
hbp = mode->htotal - mode->hsync_end - 1;
lpp = mode->vdisplay - 1;
vsw = mode->vsync_end - mode->vsync_start - 1;
vfp = mode->vsync_start - mode->vdisplay;
vbp = mode->vtotal - mode->vsync_end;
cpl = mode->hdisplay - 1;
writel((ppl << 2) |
(hsw << 8) |
(hfp << 16) |
(hbp << 24),
priv->regs + CLCD_TIM0);
writel(lpp |
(vsw << 10) |
(vfp << 16) |
(vbp << 24),
priv->regs + CLCD_TIM1);
spin_lock(&priv->tim2_lock);
tim2 = readl(priv->regs + CLCD_TIM2);
tim2 &= (TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK);
if (priv->variant->broken_clockdivider)
tim2 |= TIM2_BCD;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
tim2 |= TIM2_IHS;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
tim2 |= TIM2_IVS;
if (connector) {
if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
tim2 |= TIM2_IOE;
if (connector->display_info.bus_flags &
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
tim2 |= TIM2_IPC;
if (connector->display_info.num_bus_formats == 1 &&
connector->display_info.bus_formats[0] ==
MEDIA_BUS_FMT_Y8_1X8)
grayscale = true;
/*
* The AC pin bias frequency is set to max count when using
* grayscale so at least once in a while we will reverse
* polarity and get rid of any DC built up that could
* damage the display.
*/
if (grayscale)
tim2 |= TIM2_ACB_MASK;
}
if (bridge) {
const struct drm_bridge_timings *btimings = bridge->timings;
/*
* Here is when things get really fun. Sometimes the bridge
* timings are such that the signal out from PL11x is not
* stable before the receiving bridge (such as a dumb VGA DAC
* or similar) samples it. If that happens, we compensate by
* the only method we have: output the data on the opposite
* edge of the clock so it is for sure stable when it gets
* sampled.
*
* The PL111 manual does not contain proper timining diagrams
* or data for these details, but we know from experiments
* that the setup time is more than 3000 picoseconds (3 ns).
* If we have a bridge that requires the signal to be stable
* earlier than 3000 ps before the clock pulse, we have to
* output the data on the opposite edge to avoid flicker.
*/
if (btimings && btimings->setup_time_ps >= 3000)
tim2 ^= TIM2_IPC;
}
tim2 |= cpl << 16;
writel(tim2, priv->regs + CLCD_TIM2);
spin_unlock(&priv->tim2_lock);
writel(0, priv->regs + CLCD_TIM3);
/*
* Detect grayscale bus format. We do not support a grayscale mode
* toward userspace, instead we expose an RGB24 buffer and then the
* hardware will activate its grayscaler to convert to the grayscale
* format.
*/
if (grayscale)
cntl = CNTL_LCDEN | CNTL_LCDMONO8;
else
/* Else we assume TFT display */
cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
/* On the ST Micro variant, assume all 24 bits are connected */
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_CDWID_24;
/*
* Note that the ARM hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
* to low bit as you read left to right. The ST Micro version of
* the PL110 (LCDC) however uses the standard DRM format.
*/
switch (fb->format->format) {
case DRM_FORMAT_BGR888:
/* Only supported on the ST Micro variant */
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_LCDBPP24_PACKED | CNTL_BGR;
break;
case DRM_FORMAT_RGB888:
/* Only supported on the ST Micro variant */
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_LCDBPP24_PACKED;
break;
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
if (priv->variant->st_bitmux_control)
cntl |= CNTL_LCDBPP24 | CNTL_BGR;
else
cntl |= CNTL_LCDBPP24;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
if (priv->variant->st_bitmux_control)
cntl |= CNTL_LCDBPP24;
else
cntl |= CNTL_LCDBPP24 | CNTL_BGR;
break;
case DRM_FORMAT_BGR565:
if (priv->variant->is_pl110)
cntl |= CNTL_LCDBPP16;
else if (priv->variant->st_bitmux_control)
cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565 | CNTL_BGR;
else
cntl |= CNTL_LCDBPP16_565;
break;
case DRM_FORMAT_RGB565:
if (priv->variant->is_pl110)
cntl |= CNTL_LCDBPP16 | CNTL_BGR;
else if (priv->variant->st_bitmux_control)
cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565;
else
cntl |= CNTL_LCDBPP16_565 | CNTL_BGR;
break;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_XBGR1555:
cntl |= CNTL_LCDBPP16;
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_1XBPP_5551 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
cntl |= CNTL_LCDBPP16;
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_1XBPP_5551;
else
cntl |= CNTL_BGR;
break;
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_XBGR4444:
cntl |= CNTL_LCDBPP16_444;
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_1XBPP_444 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB4444:
cntl |= CNTL_LCDBPP16_444;
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_1XBPP_444;
else
cntl |= CNTL_BGR;
break;
default:
WARN_ONCE(true, "Unknown FB format 0x%08x\n",
fb->format->format);
break;
}
/* The PL110 in Integrator/Versatile does the BGR routing externally */
if (priv->variant->external_bgr)
cntl &= ~CNTL_BGR;
/* Power sequence: first enable and chill */
writel(cntl, priv->regs + priv->ctrl);
/*
* We expect this delay to stabilize the contrast
* voltage Vee as stipulated by the manual
*/
msleep(20);
if (priv->variant_display_enable)
priv->variant_display_enable(drm, fb->format->format);
/* Power Up */
cntl |= CNTL_LCDPWR;
writel(cntl, priv->regs + priv->ctrl);
if (!priv->variant->broken_vblank)
drm_crtc_vblank_on(crtc);
}
static void pl111_display_disable(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cntl;
if (!priv->variant->broken_vblank)
drm_crtc_vblank_off(crtc);
/* Power Down */
cntl = readl(priv->regs + priv->ctrl);
if (cntl & CNTL_LCDPWR) {
cntl &= ~CNTL_LCDPWR;
writel(cntl, priv->regs + priv->ctrl);
}
/*
* We expect this delay to stabilize the contrast voltage Vee as
* stipulated by the manual
*/
msleep(20);
if (priv->variant_display_disable)
priv->variant_display_disable(drm);
/* Disable */
writel(0, priv->regs + priv->ctrl);
clk_disable_unprepare(priv->clk);
}
static void pl111_display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_pstate)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
struct drm_pending_vblank_event *event = crtc->state->event;
struct drm_plane *plane = &pipe->plane;
struct drm_plane_state *pstate = plane->state;
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
u32 addr = drm_fb_dma_get_gem_addr(fb, pstate, 0);
writel(addr, priv->regs + CLCD_UBAS);
}
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static int pl111_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
writel(CLCD_IRQ_NEXTBASE_UPDATE, priv->regs + priv->ienb);
return 0;
}
static void pl111_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
writel(0, priv->regs + priv->ienb);
}
static struct drm_simple_display_pipe_funcs pl111_display_funcs = {
.mode_valid = pl111_mode_valid,
.check = pl111_display_check,
.enable = pl111_display_enable,
.disable = pl111_display_disable,
.update = pl111_display_update,
};
static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
unsigned long *prate, bool set_parent)
{
int best_div = 1, div;
struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long best_prate = 0;
unsigned long best_diff = ~0ul;
int max_div = (1 << (TIM2_PCD_LO_BITS + TIM2_PCD_HI_BITS)) - 1;
for (div = 1; div < max_div; div++) {
unsigned long this_prate, div_rate, diff;
if (set_parent)
this_prate = clk_hw_round_rate(parent, rate * div);
else
this_prate = *prate;
div_rate = DIV_ROUND_UP_ULL(this_prate, div);
diff = abs(rate - div_rate);
if (diff < best_diff) {
best_div = div;
best_diff = diff;
best_prate = this_prate;
}
}
*prate = best_prate;
return best_div;
}
static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int div = pl111_clk_div_choose_div(hw, rate, prate, true);
return DIV_ROUND_UP_ULL(*prate, div);
}
static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct pl111_drm_dev_private *priv =
container_of(hw, struct pl111_drm_dev_private, clk_div);
u32 tim2 = readl(priv->regs + CLCD_TIM2);
int div;
if (tim2 & TIM2_BCD)
return prate;
div = tim2 & TIM2_PCD_LO_MASK;
div |= (tim2 & TIM2_PCD_HI_MASK) >>
(TIM2_PCD_HI_SHIFT - TIM2_PCD_LO_BITS);
div += 2;
return DIV_ROUND_UP_ULL(prate, div);
}
static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct pl111_drm_dev_private *priv =
container_of(hw, struct pl111_drm_dev_private, clk_div);
int div = pl111_clk_div_choose_div(hw, rate, &prate, false);
u32 tim2;
spin_lock(&priv->tim2_lock);
tim2 = readl(priv->regs + CLCD_TIM2);
tim2 &= ~(TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK);
if (div == 1) {
tim2 |= TIM2_BCD;
} else {
div -= 2;
tim2 |= div & TIM2_PCD_LO_MASK;
tim2 |= (div >> TIM2_PCD_LO_BITS) << TIM2_PCD_HI_SHIFT;
}
writel(tim2, priv->regs + CLCD_TIM2);
spin_unlock(&priv->tim2_lock);
return 0;
}
static const struct clk_ops pl111_clk_div_ops = {
.recalc_rate = pl111_clk_div_recalc_rate,
.round_rate = pl111_clk_div_round_rate,
.set_rate = pl111_clk_div_set_rate,
};
static int
pl111_init_clock_divider(struct drm_device *drm)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
struct clk *parent = devm_clk_get(drm->dev, "clcdclk");
struct clk_hw *div = &priv->clk_div;
const char *parent_name;
struct clk_init_data init = {
.name = "pl111_div",
.ops = &pl111_clk_div_ops,
.parent_names = &parent_name,
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
};
int ret;
if (IS_ERR(parent)) {
dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
return PTR_ERR(parent);
}
spin_lock_init(&priv->tim2_lock);
/* If the clock divider is broken, use the parent directly */
if (priv->variant->broken_clockdivider) {
priv->clk = parent;
return 0;
}
parent_name = __clk_get_name(parent);
div->init = &init;
ret = devm_clk_hw_register(drm->dev, div);
priv->clk = div->clk;
return ret;
}
int pl111_display_init(struct drm_device *drm)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
int ret;
ret = pl111_init_clock_divider(drm);
if (ret)
return ret;
if (!priv->variant->broken_vblank) {
pl111_display_funcs.enable_vblank = pl111_display_enable_vblank;
pl111_display_funcs.disable_vblank = pl111_display_disable_vblank;
}
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&pl111_display_funcs,
priv->variant->formats,
priv->variant->nformats,
NULL,
priv->connector);
if (ret)
return ret;
return 0;
}
| linux-master | drivers/gpu/drm/pl111/pl111_display.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Versatile family (ARM reference designs) handling for the PL11x.
* This is based on code and know-how in the previous frame buffer
* driver in drivers/video/fbdev/amba-clcd.c:
* Copyright (C) 2001 ARM Limited, by David A Rusling
* Updated to 2.5 by Deep Blue Solutions Ltd.
* Major contributions and discoveries by Russell King.
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/vexpress.h>
#include <drm/drm_fourcc.h>
#include "pl111_versatile.h"
#include "pl111_drm.h"
static struct regmap *versatile_syscon_map;
/*
* We detect the different syscon types from the compatible strings.
*/
enum versatile_clcd {
INTEGRATOR_IMPD1,
INTEGRATOR_CLCD_CM,
VERSATILE_CLCD,
REALVIEW_CLCD_EB,
REALVIEW_CLCD_PB1176,
REALVIEW_CLCD_PB11MP,
REALVIEW_CLCD_PBA8,
REALVIEW_CLCD_PBX,
VEXPRESS_CLCD_V2M,
};
static const struct of_device_id versatile_clcd_of_match[] = {
{
.compatible = "arm,core-module-integrator",
.data = (void *)INTEGRATOR_CLCD_CM,
},
{
.compatible = "arm,versatile-sysreg",
.data = (void *)VERSATILE_CLCD,
},
{
.compatible = "arm,realview-eb-syscon",
.data = (void *)REALVIEW_CLCD_EB,
},
{
.compatible = "arm,realview-pb1176-syscon",
.data = (void *)REALVIEW_CLCD_PB1176,
},
{
.compatible = "arm,realview-pb11mp-syscon",
.data = (void *)REALVIEW_CLCD_PB11MP,
},
{
.compatible = "arm,realview-pba8-syscon",
.data = (void *)REALVIEW_CLCD_PBA8,
},
{
.compatible = "arm,realview-pbx-syscon",
.data = (void *)REALVIEW_CLCD_PBX,
},
{
.compatible = "arm,vexpress-muxfpga",
.data = (void *)VEXPRESS_CLCD_V2M,
},
{},
};
static const struct of_device_id impd1_clcd_of_match[] = {
{
.compatible = "arm,im-pd1-syscon",
.data = (void *)INTEGRATOR_IMPD1,
},
{},
};
/*
* Core module CLCD control on the Integrator/CP, bits
* 8 thru 19 of the CM_CONTROL register controls a bunch
* of CLCD settings.
*/
#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
/* Bits 11,12,13 controls the LCD or VGA bridge type */
#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
#define INTEGRATOR_CLCD_LCD0_EN BIT(14)
#define INTEGRATOR_CLCD_LCD1_EN BIT(15)
/* R/L flip on Sharp */
#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16)
/* U/D flip on Sharp */
#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17)
/* No connection on Sharp */
#define INTEGRATOR_CLCD_LCD_STATIC BIT(18)
/* 0 = 24bit VGA, 1 = 18bit VGA */
#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
#define INTEGRATOR_CLCD_MASK GENMASK(19, 8)
static void pl111_integrator_enable(struct drm_device *drm, u32 format)
{
u32 val;
dev_info(drm->dev, "enable Integrator CLCD connectors\n");
/* FIXME: really needed? */
val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
/* 24bit formats */
val |= INTEGRATOR_CLCD_LCDMUX_VGA24;
break;
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_XRGB1555:
/* Pseudocolor, RGB555, BGR555 */
val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
break;
default:
dev_err(drm->dev, "unhandled format on Integrator 0x%08x\n",
format);
break;
}
regmap_update_bits(versatile_syscon_map,
INTEGRATOR_HDR_CTRL_OFFSET,
INTEGRATOR_CLCD_MASK,
val);
}
#define IMPD1_CTRL_OFFSET 0x18
#define IMPD1_CTRL_DISP_LCD (0 << 0)
#define IMPD1_CTRL_DISP_VGA (1 << 0)
#define IMPD1_CTRL_DISP_LCD1 (2 << 0)
#define IMPD1_CTRL_DISP_ENABLE (1 << 2)
#define IMPD1_CTRL_DISP_MASK (7 << 0)
static void pl111_impd1_enable(struct drm_device *drm, u32 format)
{
u32 val;
dev_info(drm->dev, "enable IM-PD1 CLCD connectors\n");
val = IMPD1_CTRL_DISP_VGA | IMPD1_CTRL_DISP_ENABLE;
regmap_update_bits(versatile_syscon_map,
IMPD1_CTRL_OFFSET,
IMPD1_CTRL_DISP_MASK,
val);
}
static void pl111_impd1_disable(struct drm_device *drm)
{
dev_info(drm->dev, "disable IM-PD1 CLCD connectors\n");
regmap_update_bits(versatile_syscon_map,
IMPD1_CTRL_OFFSET,
IMPD1_CTRL_DISP_MASK,
0);
}
/*
* This configuration register in the Versatile and RealView
* family is uniformly present but appears more and more
* unutilized starting with the RealView series.
*/
#define SYS_CLCD 0x50
#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1))
#define SYS_CLCD_MODE_888 0
#define SYS_CLCD_MODE_5551 BIT(0)
#define SYS_CLCD_MODE_565_R_LSB BIT(1)
#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1))
#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5))
#define SYS_CLCD_NLCDIOON BIT(2)
#define SYS_CLCD_VDDPOSSWITCH BIT(3)
#define SYS_CLCD_PWR3V5SWITCH BIT(4)
#define SYS_CLCD_VDDNEGSWITCH BIT(5)
static void pl111_versatile_disable(struct drm_device *drm)
{
dev_info(drm->dev, "disable Versatile CLCD connectors\n");
regmap_update_bits(versatile_syscon_map,
SYS_CLCD,
SYS_CLCD_CONNECTOR_MASK,
0);
}
static void pl111_versatile_enable(struct drm_device *drm, u32 format)
{
u32 val = 0;
dev_info(drm->dev, "enable Versatile CLCD connectors\n");
switch (format) {
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
val |= SYS_CLCD_MODE_888;
break;
case DRM_FORMAT_BGR565:
val |= SYS_CLCD_MODE_565_R_LSB;
break;
case DRM_FORMAT_RGB565:
val |= SYS_CLCD_MODE_565_B_LSB;
break;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
val |= SYS_CLCD_MODE_5551;
break;
default:
dev_err(drm->dev, "unhandled format on Versatile 0x%08x\n",
format);
break;
}
/* Set up the MUX */
regmap_update_bits(versatile_syscon_map,
SYS_CLCD,
SYS_CLCD_MODE_MASK,
val);
/* Then enable the display */
regmap_update_bits(versatile_syscon_map,
SYS_CLCD,
SYS_CLCD_CONNECTOR_MASK,
SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
}
static void pl111_realview_clcd_disable(struct drm_device *drm)
{
dev_info(drm->dev, "disable RealView CLCD connectors\n");
regmap_update_bits(versatile_syscon_map,
SYS_CLCD,
SYS_CLCD_CONNECTOR_MASK,
0);
}
static void pl111_realview_clcd_enable(struct drm_device *drm, u32 format)
{
dev_info(drm->dev, "enable RealView CLCD connectors\n");
regmap_update_bits(versatile_syscon_map,
SYS_CLCD,
SYS_CLCD_CONNECTOR_MASK,
SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
}
/* PL110 pixel formats for Integrator, vanilla PL110 */
static const u32 pl110_integrator_pixel_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
};
/* Extended PL110 pixel formats for Integrator and Versatile */
static const u32 pl110_versatile_pixel_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGR565, /* Uses external PLD */
DRM_FORMAT_RGB565, /* Uses external PLD */
DRM_FORMAT_ABGR1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
};
static const u32 pl111_realview_pixel_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_XBGR4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
};
/*
* The Integrator variant is a PL110 with a bunch of broken, or not
* yet implemented features
*/
static const struct pl111_variant_data pl110_integrator = {
.name = "PL110 Integrator",
.is_pl110 = true,
.broken_clockdivider = true,
.broken_vblank = true,
.formats = pl110_integrator_pixel_formats,
.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
.fb_depth = 16,
};
/*
* The IM-PD1 variant is a PL110 with a bunch of broken, or not
* yet implemented features
*/
static const struct pl111_variant_data pl110_impd1 = {
.name = "PL110 IM-PD1",
.is_pl110 = true,
.broken_clockdivider = true,
.broken_vblank = true,
.formats = pl110_integrator_pixel_formats,
.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
.fb_depth = 15,
};
/*
* This is the in-between PL110 variant found in the ARM Versatile,
* supporting RGB565/BGR565
*/
static const struct pl111_variant_data pl110_versatile = {
.name = "PL110 Versatile",
.is_pl110 = true,
.external_bgr = true,
.formats = pl110_versatile_pixel_formats,
.nformats = ARRAY_SIZE(pl110_versatile_pixel_formats),
.fb_depth = 16,
};
/*
* RealView PL111 variant, the only real difference from the vanilla
* PL111 is that we select 16bpp framebuffer by default to be able
* to get 1024x768 without saturating the memory bus.
*/
static const struct pl111_variant_data pl111_realview = {
.name = "PL111 RealView",
.formats = pl111_realview_pixel_formats,
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
.fb_depth = 16,
};
/*
* Versatile Express PL111 variant, again we just push the maximum
* BPP to 16 to be able to get 1024x768 without saturating the memory
* bus. The clockdivider also seems broken on the Versatile Express.
*/
static const struct pl111_variant_data pl111_vexpress = {
.name = "PL111 Versatile Express",
.formats = pl111_realview_pixel_formats,
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
.fb_depth = 16,
.broken_clockdivider = true,
};
#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
struct pl111_drm_dev_private *priv)
{
struct platform_device *pdev;
struct device_node *root;
struct device_node *child;
struct device_node *ct_clcd = NULL;
struct regmap *map;
bool has_coretile_clcd = false;
bool has_coretile_hdlcd = false;
bool mux_motherboard = true;
u32 val;
int ret;
if (!IS_ENABLED(CONFIG_VEXPRESS_CONFIG))
return -ENODEV;
/*
* Check if we have a CLCD or HDLCD on the core tile by checking if a
* CLCD or HDLCD is available in the root of the device tree.
*/
root = of_find_node_by_path("/");
if (!root)
return -EINVAL;
for_each_available_child_of_node(root, child) {
if (of_device_is_compatible(child, "arm,pl111")) {
has_coretile_clcd = true;
ct_clcd = child;
of_node_put(child);
break;
}
if (of_device_is_compatible(child, "arm,hdlcd")) {
has_coretile_hdlcd = true;
of_node_put(child);
break;
}
}
of_node_put(root);
/*
* If there is a coretile HDLCD and it has a driver,
* do not mux the CLCD on the motherboard to the DVI.
*/
if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
mux_motherboard = false;
/*
* On the Vexpress CA9 we let the CLCD on the coretile
* take precedence, so also in this case do not mux the
* motherboard to the DVI.
*/
if (has_coretile_clcd)
mux_motherboard = false;
if (mux_motherboard) {
dev_info(dev, "DVI muxed to motherboard CLCD\n");
val = VEXPRESS_FPGAMUX_MOTHERBOARD;
} else if (ct_clcd == dev->of_node) {
dev_info(dev,
"DVI muxed to daughterboard 1 (core tile) CLCD\n");
val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
} else {
dev_info(dev, "core tile graphics present\n");
dev_info(dev, "this device will be deactivated\n");
return -ENODEV;
}
/* Call into deep Vexpress configuration API */
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_err(dev, "can't find the sysreg device, deferring\n");
return -EPROBE_DEFER;
}
map = devm_regmap_init_vexpress_config(&pdev->dev);
if (IS_ERR(map)) {
platform_device_put(pdev);
return PTR_ERR(map);
}
ret = regmap_write(map, 0, val);
platform_device_put(pdev);
if (ret) {
dev_err(dev, "error setting DVI muxmode\n");
return -ENODEV;
}
priv->variant = &pl111_vexpress;
dev_info(dev, "initializing Versatile Express PL111\n");
return 0;
}
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
{
const struct of_device_id *clcd_id;
enum versatile_clcd versatile_clcd_type;
struct device_node *np;
struct regmap *map;
np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
&clcd_id);
if (!np) {
/* Non-ARM reference designs, just bail out */
return 0;
}
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
/* Versatile Express special handling */
if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
int ret = pl111_vexpress_clcd_init(dev, np, priv);
of_node_put(np);
if (ret)
dev_err(dev, "Versatile Express init failed - %d", ret);
return ret;
}
/*
* On the Integrator, check if we should use the IM-PD1 instead,
* if we find it, it will take precedence. This is on the Integrator/AP
* which only has this option for PL110 graphics.
*/
if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
&clcd_id);
if (np)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
}
map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
return PTR_ERR(map);
}
switch (versatile_clcd_type) {
case INTEGRATOR_CLCD_CM:
versatile_syscon_map = map;
priv->variant = &pl110_integrator;
priv->variant_display_enable = pl111_integrator_enable;
dev_info(dev, "set up callbacks for Integrator PL110\n");
break;
case INTEGRATOR_IMPD1:
versatile_syscon_map = map;
priv->variant = &pl110_impd1;
priv->variant_display_enable = pl111_impd1_enable;
priv->variant_display_disable = pl111_impd1_disable;
dev_info(dev, "set up callbacks for IM-PD1 PL110\n");
break;
case VERSATILE_CLCD:
versatile_syscon_map = map;
/* This can do RGB565 with external PLD */
priv->variant = &pl110_versatile;
priv->variant_display_enable = pl111_versatile_enable;
priv->variant_display_disable = pl111_versatile_disable;
/*
* The Versatile has a variant halfway between PL110
* and PL111 where these two registers have already been
* swapped.
*/
priv->ienb = CLCD_PL111_IENB;
priv->ctrl = CLCD_PL111_CNTL;
dev_info(dev, "set up callbacks for Versatile PL110\n");
break;
case REALVIEW_CLCD_EB:
case REALVIEW_CLCD_PB1176:
case REALVIEW_CLCD_PB11MP:
case REALVIEW_CLCD_PBA8:
case REALVIEW_CLCD_PBX:
versatile_syscon_map = map;
priv->variant = &pl111_realview;
priv->variant_display_enable = pl111_realview_clcd_enable;
priv->variant_display_disable = pl111_realview_clcd_disable;
dev_info(dev, "set up callbacks for RealView PL111\n");
break;
default:
dev_info(dev, "unknown Versatile system controller\n");
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(pl111_versatile_init);
| linux-master | drivers/gpu/drm/pl111/pl111_versatile.c |
// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
static const struct drm_connector_funcs vkms_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_encoder_funcs vkms_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
return count;
}
static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index,
struct drm_crtc *crtc)
{
struct vkms_plane *overlay;
overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
if (IS_ERR(overlay))
return PTR_ERR(overlay);
if (!overlay->base.possible_crtcs)
overlay->base.possible_crtcs = drm_crtc_mask(crtc);
return 0;
}
int vkms_output_init(struct vkms_device *vkmsdev, int index)
{
struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
struct vkms_plane *primary, *cursor = NULL;
int ret;
int writeback;
unsigned int n;
primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
if (vkmsdev->config->overlay) {
for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
ret = vkms_add_overlay_plane(vkmsdev, index, crtc);
if (ret)
return ret;
}
}
if (vkmsdev->config->cursor) {
cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
}
ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
if (ret)
return ret;
ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to init connector\n");
goto err_connector;
}
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
}
encoder->possible_crtcs = 1;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("Failed to attach connector to encoder\n");
goto err_attach;
}
if (vkmsdev->config->writeback) {
writeback = vkms_enable_writeback_connector(vkmsdev);
if (writeback)
DRM_ERROR("Failed to init writeback connector\n");
}
drm_mode_config_reset(dev);
return 0;
err_attach:
drm_encoder_cleanup(encoder);
err_encoder:
drm_connector_cleanup(connector);
err_connector:
drm_crtc_cleanup(crtc);
return ret;
}
| linux-master | drivers/gpu/drm/vkms/vkms_output.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/minmax.h>
#include <drm/drm_blend.h>
#include <drm/drm_rect.h>
#include <drm/drm_fixed.h>
#include "vkms_formats.h"
static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
{
return frame_info->offset + (y * frame_info->pitch)
+ (x * frame_info->cpp);
}
/*
* packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates
*
* @frame_info: Buffer metadata
* @x: The x(width) coordinate of the 2D buffer
* @y: The y(Heigth) coordinate of the 2D buffer
*
* Takes the information stored in the frame_info, a pair of coordinates, and
* returns the address of the first color channel.
* This function assumes the channels are packed together, i.e. a color channel
* comes immediately after another in the memory. And therefore, this function
* doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
*/
static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
int x, int y)
{
size_t offset = pixel_offset(frame_info, x, y);
return (u8 *)frame_info->map[0].vaddr + offset;
}
static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
{
int x_src = frame_info->src.x1 >> 16;
int y_src = y - frame_info->rotated.y1 + (frame_info->src.y1 >> 16);
return packed_pixels_addr(frame_info, x_src, y_src);
}
static int get_x_position(const struct vkms_frame_info *frame_info, int limit, int x)
{
if (frame_info->rotation & (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_270))
return limit - x - 1;
return x;
}
static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
/*
* The 257 is the "conversion ratio". This number is obtained by the
* (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
* the best color value in a pixel format with more possibilities.
* A similar idea applies to others RGB color conversions.
*/
out_pixel->a = (u16)src_pixels[3] * 257;
out_pixel->r = (u16)src_pixels[2] * 257;
out_pixel->g = (u16)src_pixels[1] * 257;
out_pixel->b = (u16)src_pixels[0] * 257;
}
static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
out_pixel->a = (u16)0xffff;
out_pixel->r = (u16)src_pixels[2] * 257;
out_pixel->g = (u16)src_pixels[1] * 257;
out_pixel->b = (u16)src_pixels[0] * 257;
}
static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
u16 *pixels = (u16 *)src_pixels;
out_pixel->a = le16_to_cpu(pixels[3]);
out_pixel->r = le16_to_cpu(pixels[2]);
out_pixel->g = le16_to_cpu(pixels[1]);
out_pixel->b = le16_to_cpu(pixels[0]);
}
static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
u16 *pixels = (u16 *)src_pixels;
out_pixel->a = (u16)0xffff;
out_pixel->r = le16_to_cpu(pixels[2]);
out_pixel->g = le16_to_cpu(pixels[1]);
out_pixel->b = le16_to_cpu(pixels[0]);
}
static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
u16 *pixels = (u16 *)src_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
u16 rgb_565 = le16_to_cpu(*pixels);
s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
out_pixel->a = (u16)0xffff;
out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
}
/**
* vkms_compose_row - compose a single row of a plane
* @stage_buffer: output line with the composed pixels
* @plane: state of the plane that is being composed
* @y: y coordinate of the row
*
* This function composes a single row of a plane. It gets the source pixels
* through the y coordinate (see get_packed_src_addr()) and goes linearly
* through the source pixel, reading the pixels and converting it to
* ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
* the source pixels are not traversed linearly. The source pixels are queried
* on each iteration in order to traverse the pixels vertically.
*/
void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
{
struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
struct vkms_frame_info *frame_info = plane->frame_info;
u8 *src_pixels = get_packed_src_addr(frame_info, y);
int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp) {
int x_pos = get_x_position(frame_info, limit, x);
if (drm_rotation_90_or_270(frame_info->rotation))
src_pixels = get_packed_src_addr(frame_info, x + frame_info->rotated.y1)
+ frame_info->cpp * y;
plane->pixel_read(src_pixels, &out_pixels[x_pos]);
}
}
/*
* The following functions take an line of argb_u16 pixels from the
* src_buffer, convert them to a specific format, and store them in the
* destination.
*
* They are used in the `compose_active_planes` to convert and store a line
* from the src_buffer to the writeback buffer.
*/
static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
/*
* This sequence below is important because the format's byte order is
* in little-endian. In the case of the ARGB8888 the memory is
* organized this way:
*
* | Addr | = blue channel
* | Addr + 1 | = green channel
* | Addr + 2 | = Red channel
* | Addr + 3 | = Alpha channel
*/
dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
dst_pixels[3] = 0xff;
dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
u16 *pixels = (u16 *)dst_pixels;
pixels[3] = cpu_to_le16(in_pixel->a);
pixels[2] = cpu_to_le16(in_pixel->r);
pixels[1] = cpu_to_le16(in_pixel->g);
pixels[0] = cpu_to_le16(in_pixel->b);
}
static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
u16 *pixels = (u16 *)dst_pixels;
pixels[3] = 0xffff;
pixels[2] = cpu_to_le16(in_pixel->r);
pixels[1] = cpu_to_le16(in_pixel->g);
pixels[0] = cpu_to_le16(in_pixel->b);
}
static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
u16 *pixels = (u16 *)dst_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
s64 fp_r = drm_int2fixp(in_pixel->r);
s64 fp_g = drm_int2fixp(in_pixel->g);
s64 fp_b = drm_int2fixp(in_pixel->b);
u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
*pixels = cpu_to_le16(r << 11 | g << 5 | b);
}
void vkms_writeback_row(struct vkms_writeback_job *wb,
const struct line_buffer *src_buffer, int y)
{
struct vkms_frame_info *frame_info = &wb->wb_frame_info;
int x_dst = frame_info->dst.x1;
u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
wb->pixel_write(dst_pixels, &in_pixels[x]);
}
void *get_pixel_conversion_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
return &ARGB8888_to_argb_u16;
case DRM_FORMAT_XRGB8888:
return &XRGB8888_to_argb_u16;
case DRM_FORMAT_ARGB16161616:
return &ARGB16161616_to_argb_u16;
case DRM_FORMAT_XRGB16161616:
return &XRGB16161616_to_argb_u16;
case DRM_FORMAT_RGB565:
return &RGB565_to_argb_u16;
default:
return NULL;
}
}
void *get_pixel_write_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
return &argb_u16_to_ARGB8888;
case DRM_FORMAT_XRGB8888:
return &argb_u16_to_XRGB8888;
case DRM_FORMAT_ARGB16161616:
return &argb_u16_to_ARGB16161616;
case DRM_FORMAT_XRGB16161616:
return &argb_u16_to_XRGB16161616;
case DRM_FORMAT_RGB565:
return &argb_u16_to_RGB565;
default:
return NULL;
}
}
| linux-master | drivers/gpu/drm/vkms/vkms_formats.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/iosys-map.h>
#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_writeback.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include "vkms_drv.h"
#include "vkms_formats.h"
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
DRM_FORMAT_RGB565
};
static const struct drm_connector_funcs vkms_wb_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_framebuffer *fb;
const struct drm_display_mode *mode = &crtc_state->mode;
int ret;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
fb = conn_state->writeback_job->fb;
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
fb->width, fb->height);
return -EINVAL;
}
ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
if (ret < 0)
return ret;
return 0;
}
static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
.atomic_check = vkms_wb_encoder_atomic_check,
};
static int vkms_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
}
static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
struct drm_writeback_job *job)
{
struct vkms_writeback_job *vkmsjob;
int ret;
if (!job->fb)
return 0;
vkmsjob = kzalloc(sizeof(*vkmsjob), GFP_KERNEL);
if (!vkmsjob)
return -ENOMEM;
ret = drm_gem_fb_vmap(job->fb, vkmsjob->wb_frame_info.map, vkmsjob->data);
if (ret) {
DRM_ERROR("vmap failed: %d\n", ret);
goto err_kfree;
}
vkmsjob->wb_frame_info.fb = job->fb;
drm_framebuffer_get(vkmsjob->wb_frame_info.fb);
job->priv = vkmsjob;
return 0;
err_kfree:
kfree(vkmsjob);
return ret;
}
static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct vkms_writeback_job *vkmsjob = job->priv;
struct vkms_device *vkmsdev;
if (!job->fb)
return;
drm_gem_fb_vunmap(job->fb, vkmsjob->wb_frame_info.map);
drm_framebuffer_put(vkmsjob->wb_frame_info.fb);
vkmsdev = drm_device_to_vkms_device(job->fb->dev);
vkms_set_composer(&vkmsdev->output, false);
kfree(vkmsjob);
}
static void vkms_wb_atomic_commit(struct drm_connector *conn,
struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
conn);
struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_writeback_connector *wb_conn = &output->wb_connector;
struct drm_connector_state *conn_state = wb_conn->base.state;
struct vkms_crtc_state *crtc_state = output->composer_state;
struct drm_framebuffer *fb = connector_state->writeback_job->fb;
u16 crtc_height = crtc_state->base.crtc->mode.vdisplay;
u16 crtc_width = crtc_state->base.crtc->mode.hdisplay;
struct vkms_writeback_job *active_wb;
struct vkms_frame_info *wb_frame_info;
u32 wb_format = fb->format->format;
if (!conn_state)
return;
vkms_set_composer(&vkmsdev->output, true);
active_wb = conn_state->writeback_job->priv;
wb_frame_info = &active_wb->wb_frame_info;
spin_lock_irq(&output->composer_lock);
crtc_state->active_writeback = active_wb;
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
wb_frame_info->offset = fb->offsets[0];
wb_frame_info->pitch = fb->pitches[0];
wb_frame_info->cpp = fb->format->cpp[0];
drm_writeback_queue_job(wb_conn, connector_state);
active_wb->pixel_write = get_pixel_write_function(wb_format);
drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
drm_rect_init(&wb_frame_info->dst, 0, 0, crtc_width, crtc_height);
}
static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
.get_modes = vkms_wb_connector_get_modes,
.prepare_writeback_job = vkms_wb_prepare_job,
.cleanup_writeback_job = vkms_wb_cleanup_job,
.atomic_commit = vkms_wb_atomic_commit,
};
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
{
struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
return drm_writeback_connector_init(&vkmsdev->drm, wb,
&vkms_wb_connector_funcs,
&vkms_wb_encoder_helper_funcs,
vkms_wb_formats,
ARRAY_SIZE(vkms_wb_formats),
1);
}
| linux-master | drivers/gpu/drm/vkms/vkms_writeback.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/crc32.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fixed.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include <linux/minmax.h>
#include "vkms_drv.h"
static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
{
u32 new_color;
new_color = (src * 0xffff + dst * (0xffff - alpha));
return DIV_ROUND_CLOSEST(new_color, 0xffff);
}
/**
* pre_mul_alpha_blend - alpha blending equation
* @frame_info: Source framebuffer's metadata
* @stage_buffer: The line with the pixels from src_plane
* @output_buffer: A line buffer that receives all the blends output
*
* Using the information from the `frame_info`, this blends only the
* necessary pixels from the `stage_buffer` to the `output_buffer`
* using premultiplied blend formula.
*
* The current DRM assumption is that pixel color values have been already
* pre-multiplied with the alpha channel values. See more
* drm_plane_create_blend_mode_property(). Also, this formula assumes a
* completely opaque background.
*/
static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
struct line_buffer *stage_buffer,
struct line_buffer *output_buffer)
{
int x_dst = frame_info->dst.x1;
struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
struct pixel_argb_u16 *in = stage_buffer->pixels;
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
stage_buffer->n_pixels);
for (int x = 0; x < x_limit; x++) {
out[x].a = (u16)0xffff;
out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
}
}
static int get_y_pos(struct vkms_frame_info *frame_info, int y)
{
if (frame_info->rotation & DRM_MODE_REFLECT_Y)
return drm_rect_height(&frame_info->rotated) - y - 1;
switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_90:
return frame_info->rotated.x2 - y - 1;
case DRM_MODE_ROTATE_270:
return y + frame_info->rotated.x1;
default:
return y;
}
}
static bool check_limit(struct vkms_frame_info *frame_info, int pos)
{
if (drm_rotation_90_or_270(frame_info->rotation)) {
if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
return true;
} else {
if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
return true;
}
return false;
}
static void fill_background(const struct pixel_argb_u16 *background_color,
struct line_buffer *output_buffer)
{
for (size_t i = 0; i < output_buffer->n_pixels; i++)
output_buffer->pixels[i] = *background_color;
}
// lerp(a, b, t) = a + (b - a) * t
static u16 lerp_u16(u16 a, u16 b, s64 t)
{
s64 a_fp = drm_int2fixp(a);
s64 b_fp = drm_int2fixp(b);
s64 delta = drm_fixp_mul(b_fp - a_fp, t);
return drm_fixp2int(a_fp + delta);
}
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
{
s64 color_channel_fp = drm_int2fixp(channel_value);
return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
}
/*
* This enum is related to the positions of the variables inside
* `struct drm_color_lut`, so the order of both needs to be the same.
*/
enum lut_channel {
LUT_RED = 0,
LUT_GREEN,
LUT_BLUE,
LUT_RESERVED
};
static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
enum lut_channel channel)
{
s64 lut_index = get_lut_index(lut, channel_value);
/*
* This checks if `struct drm_color_lut` has any gap added by the compiler
* between the struct fields.
*/
static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
u16 floor_channel_value = floor_lut_value[channel];
u16 ceil_channel_value = ceil_lut_value[channel];
return lerp_u16(floor_channel_value, ceil_channel_value,
lut_index & DRM_FIXED_DECIMAL_MASK);
}
static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
{
if (!crtc_state->gamma_lut.base)
return;
if (!crtc_state->gamma_lut.lut_length)
return;
for (size_t x = 0; x < output_buffer->n_pixels; x++) {
struct pixel_argb_u16 *pixel = &output_buffer->pixels[x];
pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED);
pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN);
pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE);
}
}
/**
* blend - blend the pixels from all planes and compute crc
* @wb: The writeback frame buffer metadata
* @crtc_state: The crtc state
* @crc32: The crc output of the final frame
* @output_buffer: A buffer of a row that will receive the result of the blend(s)
* @stage_buffer: The line with the pixels from plane being blend to the output
* @row_size: The size, in bytes, of a single row
*
* This function blends the pixels (Using the `pre_mul_alpha_blend`)
* from all planes, calculates the crc32 of the output from the former step,
* and, if necessary, convert and store the output to the writeback buffer.
*/
static void blend(struct vkms_writeback_job *wb,
struct vkms_crtc_state *crtc_state,
u32 *crc32, struct line_buffer *stage_buffer,
struct line_buffer *output_buffer, size_t row_size)
{
struct vkms_plane_state **plane = crtc_state->active_planes;
u32 n_active_planes = crtc_state->num_active_planes;
int y_pos;
const struct pixel_argb_u16 background_color = { .a = 0xffff };
size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay;
for (size_t y = 0; y < crtc_y_limit; y++) {
fill_background(&background_color, output_buffer);
/* The active planes are composed associatively in z-order. */
for (size_t i = 0; i < n_active_planes; i++) {
y_pos = get_y_pos(plane[i]->frame_info, y);
if (!check_limit(plane[i]->frame_info, y_pos))
continue;
vkms_compose_row(stage_buffer, plane[i], y_pos);
pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
output_buffer);
}
apply_lut(crtc_state, output_buffer);
*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
if (wb)
vkms_writeback_row(wb, output_buffer, y_pos);
}
}
static int check_format_funcs(struct vkms_crtc_state *crtc_state,
struct vkms_writeback_job *active_wb)
{
struct vkms_plane_state **planes = crtc_state->active_planes;
u32 n_active_planes = crtc_state->num_active_planes;
for (size_t i = 0; i < n_active_planes; i++)
if (!planes[i]->pixel_read)
return -1;
if (active_wb && !active_wb->pixel_write)
return -1;
return 0;
}
static int check_iosys_map(struct vkms_crtc_state *crtc_state)
{
struct vkms_plane_state **plane_state = crtc_state->active_planes;
u32 n_active_planes = crtc_state->num_active_planes;
for (size_t i = 0; i < n_active_planes; i++)
if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
return -1;
return 0;
}
static int compose_active_planes(struct vkms_writeback_job *active_wb,
struct vkms_crtc_state *crtc_state,
u32 *crc32)
{
size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
struct line_buffer output_buffer, stage_buffer;
int ret = 0;
/*
* This check exists so we can call `crc32_le` for the entire line
* instead doing it for each channel of each pixel in case
* `struct `pixel_argb_u16` had any gap added by the compiler
* between the struct fields.
*/
static_assert(sizeof(struct pixel_argb_u16) == 8);
if (WARN_ON(check_iosys_map(crtc_state)))
return -EINVAL;
if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
return -EINVAL;
line_width = crtc_state->base.crtc->mode.hdisplay;
stage_buffer.n_pixels = line_width;
output_buffer.n_pixels = line_width;
stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
if (!stage_buffer.pixels) {
DRM_ERROR("Cannot allocate memory for the output line buffer");
return -ENOMEM;
}
output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
if (!output_buffer.pixels) {
DRM_ERROR("Cannot allocate memory for intermediate line buffer");
ret = -ENOMEM;
goto free_stage_buffer;
}
blend(active_wb, crtc_state, crc32, &stage_buffer,
&output_buffer, line_width * pixel_size);
kvfree(output_buffer.pixels);
free_stage_buffer:
kvfree(stage_buffer.pixels);
return ret;
}
/**
* vkms_composer_worker - ordered work_struct to compute CRC
*
* @work: work_struct
*
* Work handler for composing and computing CRCs. work_struct scheduled in
* an ordered workqueue that's periodically scheduled to run by
* vkms_vblank_simulate() and flushed at vkms_atomic_commit_tail().
*/
void vkms_composer_worker(struct work_struct *work)
{
struct vkms_crtc_state *crtc_state = container_of(work,
struct vkms_crtc_state,
composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
bool crc_pending, wb_pending;
u64 frame_start, frame_end;
u32 crc32 = 0;
int ret;
spin_lock_irq(&out->composer_lock);
frame_start = crtc_state->frame_start;
frame_end = crtc_state->frame_end;
crc_pending = crtc_state->crc_pending;
wb_pending = crtc_state->wb_pending;
crtc_state->frame_start = 0;
crtc_state->frame_end = 0;
crtc_state->crc_pending = false;
if (crtc->state->gamma_lut) {
s64 max_lut_index_fp;
s64 u16_max_fp = drm_int2fixp(0xffff);
crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
crtc_state->gamma_lut.lut_length =
crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
u16_max_fp);
} else {
crtc_state->gamma_lut.base = NULL;
}
spin_unlock_irq(&out->composer_lock);
/*
* We raced with the vblank hrtimer and previous work already computed
* the crc, nothing to do.
*/
if (!crc_pending)
return;
if (wb_pending)
ret = compose_active_planes(active_wb, crtc_state, &crc32);
else
ret = compose_active_planes(NULL, crtc_state, &crc32);
if (ret)
return;
if (wb_pending) {
drm_writeback_signal_completion(&out->wb_connector, 0);
spin_lock_irq(&out->composer_lock);
crtc_state->wb_pending = false;
spin_unlock_irq(&out->composer_lock);
}
/*
* The worker can fall behind the vblank hrtimer, make sure we catch up.
*/
while (frame_start <= frame_end)
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
}
static const char * const pipe_crc_sources[] = {"auto"};
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count)
{
*count = ARRAY_SIZE(pipe_crc_sources);
return pipe_crc_sources;
}
static int vkms_crc_parse_source(const char *src_name, bool *enabled)
{
int ret = 0;
if (!src_name) {
*enabled = false;
} else if (strcmp(src_name, "auto") == 0) {
*enabled = true;
} else {
*enabled = false;
ret = -EINVAL;
}
return ret;
}
int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
{
bool enabled;
if (vkms_crc_parse_source(src_name, &enabled) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
return -EINVAL;
}
*values_cnt = 1;
return 0;
}
void vkms_set_composer(struct vkms_output *out, bool enabled)
{
bool old_enabled;
if (enabled)
drm_crtc_vblank_get(&out->crtc);
spin_lock_irq(&out->lock);
old_enabled = out->composer_enabled;
out->composer_enabled = enabled;
spin_unlock_irq(&out->lock);
if (old_enabled)
drm_crtc_vblank_put(&out->crtc);
}
int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
bool enabled = false;
int ret = 0;
ret = vkms_crc_parse_source(src_name, &enabled);
vkms_set_composer(out, enabled);
return ret;
}
| linux-master | drivers/gpu/drm/vkms/vkms_composer.c |
// SPDX-License-Identifier: GPL-2.0+
/**
* DOC: vkms (Virtual Kernel Modesetting)
*
* VKMS is a software-only model of a KMS driver that is useful for testing
* and for running X (or similar) on headless machines. VKMS aims to enable
* a virtual display with no need of a hardware display capability, releasing
* the GPU in DRM API tests.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_drv.h"
#include <drm/drm_print.h>
#include <drm/drm_debugfs.h>
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
#define DRIVER_DATE "20180514"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
static struct vkms_config *default_config;
static bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
static bool enable_writeback = true;
module_param_named(enable_writeback, enable_writeback, bool, 0444);
MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
static bool enable_overlay;
module_param_named(enable_overlay, enable_overlay, bool, 0444);
MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support");
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
{
struct vkms_device *vkms = drm_device_to_vkms_device(dev);
if (vkms->output.composer_workq)
destroy_workqueue(vkms->output.composer_workq);
}
static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_fake_vblank(old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
struct vkms_crtc_state *vkms_state =
to_vkms_crtc_state(old_crtc_state);
flush_work(&vkms_state->composer_work);
}
drm_atomic_helper_cleanup_planes(dev, old_state);
}
static int vkms_config_show(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
return 0;
}
static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
{ "vkms_config", vkms_config_show, 0 },
};
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
static int vkms_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!new_crtc_state->gamma_lut || !new_crtc_state->color_mgmt_changed)
continue;
if (new_crtc_state->gamma_lut->length / sizeof(struct drm_color_lut *)
> VKMS_LUT_SIZE)
return -EINVAL;
}
return drm_atomic_helper_check(dev, state);
}
static const struct drm_mode_config_funcs vkms_mode_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = vkms_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs vkms_mode_config_helpers = {
.atomic_commit_tail = vkms_atomic_commit_tail,
};
static int vkms_modeset_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.funcs = &vkms_mode_funcs;
dev->mode_config.min_width = XRES_MIN;
dev->mode_config.min_height = YRES_MIN;
dev->mode_config.max_width = XRES_MAX;
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
/* FIXME: There's a confusion between bpp and depth between this and
* fbdev helpers. We have to go with 0, meaning "pick the default",
* which ix XRGB8888 in all cases. */
dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
return vkms_output_init(vkmsdev, 0);
}
static int vkms_create(struct vkms_config *config)
{
int ret;
struct platform_device *pdev;
struct vkms_device *vkms_device;
pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver,
struct vkms_device, drm);
if (IS_ERR(vkms_device)) {
ret = PTR_ERR(vkms_device);
goto out_devres;
}
vkms_device->platform = pdev;
vkms_device->config = config;
config->dev = vkms_device;
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
if (ret) {
DRM_ERROR("Could not initialize DMA support\n");
goto out_devres;
}
ret = drm_vblank_init(&vkms_device->drm, 1);
if (ret) {
DRM_ERROR("Failed to vblank\n");
goto out_devres;
}
ret = vkms_modeset_init(vkms_device);
if (ret)
goto out_devres;
drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
ARRAY_SIZE(vkms_config_debugfs_list));
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
goto out_devres;
drm_fbdev_generic_setup(&vkms_device->drm, 0);
return 0;
out_devres:
devres_release_group(&pdev->dev, NULL);
out_unregister:
platform_device_unregister(pdev);
return ret;
}
static int __init vkms_init(void)
{
int ret;
struct vkms_config *config;
config = kmalloc(sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
default_config = config;
config->cursor = enable_cursor;
config->writeback = enable_writeback;
config->overlay = enable_overlay;
ret = vkms_create(config);
if (ret)
kfree(config);
return ret;
}
static void vkms_destroy(struct vkms_config *config)
{
struct platform_device *pdev;
if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
pdev = config->dev->platform;
drm_dev_unregister(&config->dev->drm);
drm_atomic_helper_shutdown(&config->dev->drm);
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
config->dev = NULL;
}
static void __exit vkms_exit(void)
{
if (default_config->dev)
vkms_destroy(default_config);
kfree(default_config);
}
module_init(vkms_init);
module_exit(vkms_exit);
MODULE_AUTHOR("Haneen Mohammed <[email protected]>");
MODULE_AUTHOR("Rodrigo Siqueira <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/vkms/vkms_drv.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/dma-fence.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_drv.h"
static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
{
struct vkms_output *output = container_of(timer, struct vkms_output,
vblank_hrtimer);
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state;
u64 ret_overrun;
bool ret, fence_cookie;
fence_cookie = dma_fence_begin_signalling();
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
spin_unlock(&output->lock);
if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
/* update frame_start only if a queued vkms_composer_worker()
* has read the data
*/
spin_lock(&output->composer_lock);
if (!state->crc_pending)
state->frame_start = frame;
else
DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
state->frame_start, frame);
state->frame_end = frame;
state->crc_pending = true;
spin_unlock(&output->composer_lock);
ret = queue_work(output->composer_workq, &state->composer_work);
if (!ret)
DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
dma_fence_end_signalling(fence_cookie);
return HRTIMER_RESTART;
}
static int vkms_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
drm_calc_timestamping_constants(crtc, &crtc->mode);
hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
out->vblank_hrtimer.function = &vkms_vblank_simulate;
out->period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
return 0;
}
static void vkms_disable_vblank(struct drm_crtc *crtc)
{
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
hrtimer_cancel(&out->vblank_hrtimer);
}
static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
int *max_error, ktime_t *vblank_time,
bool in_vblank_irq)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
if (!READ_ONCE(vblank->enabled)) {
*vblank_time = ktime_get();
return true;
}
*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
/*
* To prevent races we roll the hrtimer forward before we do any
* interrupt processing - this is how real hw works (the interrupt is
* only generated after all the vblank registers are updated) and what
* the vblank core expects. Therefore we need to always correct the
* timestampe by one frame.
*/
*vblank_time -= output->period_ns;
return true;
}
static struct drm_crtc_state *
vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct vkms_crtc_state *vkms_state;
if (WARN_ON(!crtc->state))
return NULL;
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
return &vkms_state->base;
}
static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(state);
WARN_ON(work_pending(&vkms_state->composer_work));
kfree(vkms_state->active_planes);
kfree(vkms_state);
}
static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
{
struct vkms_crtc_state *vkms_state =
kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (crtc->state)
vkms_atomic_crtc_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
if (vkms_state)
INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
}
static const struct drm_crtc_funcs vkms_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = vkms_atomic_crtc_reset,
.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
};
static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int i = 0, ret;
if (vkms_state->active_planes)
return 0;
ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
if (ret < 0)
return ret;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
plane);
WARN_ON(!plane_state);
if (!plane_state->visible)
continue;
i++;
}
vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
if (!vkms_state->active_planes)
return -ENOMEM;
vkms_state->num_active_planes = i;
i = 0;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
plane);
if (!plane_state->visible)
continue;
vkms_state->active_planes[i++] =
to_vkms_plane_state(plane_state);
}
return 0;
}
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
drm_crtc_vblank_off(crtc);
}
static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
/* This lock is held across the atomic commit to block vblank timer
* from scheduling vkms_composer_worker until the composer is updated
*/
spin_lock_irq(&vkms_output->lock);
}
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
if (crtc->state->event) {
spin_lock(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
spin_unlock(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
spin_unlock_irq(&vkms_output->lock);
}
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
.atomic_check = vkms_crtc_atomic_check,
.atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
.atomic_enable = vkms_crtc_atomic_enable,
.atomic_disable = vkms_crtc_atomic_disable,
};
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor)
{
struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
int ret;
ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor,
&vkms_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Failed to init CRTC\n");
return ret;
}
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
if (!vkms_out->composer_workq)
return -ENOMEM;
return ret;
}
| linux-master | drivers/gpu/drm/vkms/vkms_crtc.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/iosys-map.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "vkms_drv.h"
#include "vkms_formats.h"
static const u32 vkms_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
DRM_FORMAT_RGB565
};
static struct drm_plane_state *
vkms_plane_duplicate_state(struct drm_plane *plane)
{
struct vkms_plane_state *vkms_state;
struct vkms_frame_info *frame_info;
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state)
return NULL;
frame_info = kzalloc(sizeof(*frame_info), GFP_KERNEL);
if (!frame_info) {
DRM_DEBUG_KMS("Couldn't allocate frame_info\n");
kfree(vkms_state);
return NULL;
}
vkms_state->frame_info = frame_info;
__drm_gem_duplicate_shadow_plane_state(plane, &vkms_state->base);
return &vkms_state->base.base;
}
static void vkms_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state);
struct drm_crtc *crtc = vkms_state->base.base.crtc;
if (crtc && vkms_state->frame_info->fb) {
/* dropping the reference we acquired in
* vkms_primary_plane_update()
*/
if (drm_framebuffer_read_refcount(vkms_state->frame_info->fb))
drm_framebuffer_put(vkms_state->frame_info->fb);
}
kfree(vkms_state->frame_info);
vkms_state->frame_info = NULL;
__drm_gem_destroy_shadow_plane_state(&vkms_state->base);
kfree(vkms_state);
}
static void vkms_plane_reset(struct drm_plane *plane)
{
struct vkms_plane_state *vkms_state;
if (plane->state) {
vkms_plane_destroy_state(plane, plane->state);
plane->state = NULL; /* must be set to NULL here */
}
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state) {
DRM_ERROR("Cannot allocate vkms_plane_state\n");
return;
}
__drm_gem_reset_shadow_plane(plane, &vkms_state->base);
}
static const struct drm_plane_funcs vkms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = vkms_plane_reset,
.atomic_duplicate_state = vkms_plane_duplicate_state,
.atomic_destroy_state = vkms_plane_destroy_state,
};
static void vkms_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct vkms_plane_state *vkms_plane_state;
struct drm_shadow_plane_state *shadow_plane_state;
struct drm_framebuffer *fb = new_state->fb;
struct vkms_frame_info *frame_info;
u32 fmt;
if (!new_state->crtc || !fb)
return;
fmt = fb->format->format;
vkms_plane_state = to_vkms_plane_state(new_state);
shadow_plane_state = &vkms_plane_state->base;
frame_info = vkms_plane_state->frame_info;
memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
memcpy(&frame_info->rotated, &new_state->dst, sizeof(struct drm_rect));
frame_info->fb = fb;
memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
drm_framebuffer_get(frame_info->fb);
frame_info->rotation = drm_rotation_simplify(new_state->rotation, DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
drm_rect_rotate(&frame_info->rotated, drm_rect_width(&frame_info->rotated),
drm_rect_height(&frame_info->rotated), frame_info->rotation);
frame_info->offset = fb->offsets[0];
frame_info->pitch = fb->pitches[0];
frame_info->cpp = fb->format->cpp[0];
vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
int ret;
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
if (ret != 0)
return ret;
return 0;
}
static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_shadow_plane_state *shadow_plane_state;
struct drm_framebuffer *fb = state->fb;
int ret;
if (!fb)
return 0;
shadow_plane_state = to_drm_shadow_plane_state(state);
ret = drm_gem_plane_helper_prepare_fb(plane, state);
if (ret)
return ret;
return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
}
static void vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_shadow_plane_state *shadow_plane_state;
struct drm_framebuffer *fb = state->fb;
if (!fb)
return;
shadow_plane_state = to_drm_shadow_plane_state(state);
drm_gem_fb_vunmap(fb, shadow_plane_state->map);
}
static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = {
.atomic_update = vkms_plane_atomic_update,
.atomic_check = vkms_plane_atomic_check,
.prepare_fb = vkms_prepare_fb,
.cleanup_fb = vkms_cleanup_fb,
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index)
{
struct drm_device *dev = &vkmsdev->drm;
struct vkms_plane *plane;
plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 1 << index,
&vkms_plane_funcs,
vkms_formats, ARRAY_SIZE(vkms_formats),
NULL, type, NULL);
if (IS_ERR(plane))
return plane;
drm_plane_helper_add(&plane->base, &vkms_plane_helper_funcs);
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK);
return plane;
}
| linux-master | drivers/gpu/drm/vkms/vkms_plane.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/module.h>
#include <drm/drm_crtc_helper.h>
#include "ch7006_priv.h"
/* DRM encoder functions */
static void ch7006_encoder_set_config(struct drm_encoder *encoder,
void *params)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
priv->params = *(struct ch7006_encoder_params *)params;
}
static void ch7006_encoder_destroy(struct drm_encoder *encoder)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
drm_property_destroy(encoder->dev, priv->scale_property);
kfree(priv);
to_encoder_slave(encoder)->slave_priv = NULL;
drm_i2c_encoder_destroy(encoder);
}
static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
ch7006_dbg(client, "\n");
if (mode == priv->last_dpms)
return;
priv->last_dpms = mode;
ch7006_setup_power_state(encoder);
ch7006_load_reg(client, state, CH7006_POWER);
}
static void ch7006_encoder_save(struct drm_encoder *encoder)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
ch7006_dbg(client, "\n");
ch7006_state_save(client, &priv->saved_state);
}
static void ch7006_encoder_restore(struct drm_encoder *encoder)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
ch7006_dbg(client, "\n");
ch7006_state_load(client, &priv->saved_state);
}
static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
/* The ch7006 is painfully picky with the input timings so no
* custom modes for now... */
priv->mode = ch7006_lookup_mode(encoder, mode);
return !!priv->mode;
}
static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
if (ch7006_lookup_mode(encoder, mode))
return MODE_OK;
else
return MODE_BAD;
}
static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *drm_mode,
struct drm_display_mode *adjusted_mode)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_encoder_params *params = &priv->params;
struct ch7006_state *state = &priv->state;
uint8_t *regs = state->regs;
const struct ch7006_mode *mode = priv->mode;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
int start_active;
ch7006_dbg(client, "\n");
regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
regs[CH7006_BWIDTH] = 0;
regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
params->input_format);
regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
| bitf(CH7006_CLKMODE_XCM, params->xcm)
| bitf(CH7006_CLKMODE_PCM, params->pcm);
if (params->clock_mode)
regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
if (params->clock_edge)
regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
regs[CH7006_INPUT_SYNC] = 0;
if (params->sync_direction)
regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
if (params->sync_encoding)
regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
regs[CH7006_DETECT] = 0;
regs[CH7006_BCLKOUT] = 0;
regs[CH7006_SUBC_INC3] = 0;
if (params->pout_level)
regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
regs[CH7006_SUBC_INC4] = 0;
if (params->active_detect)
regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
ch7006_setup_levels(encoder);
ch7006_setup_subcarrier(encoder);
ch7006_setup_pll(encoder);
ch7006_setup_power_state(encoder);
ch7006_setup_properties(encoder);
ch7006_state_load(client, state);
}
static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
int det;
ch7006_dbg(client, "\n");
ch7006_save_reg(client, state, CH7006_DETECT);
ch7006_save_reg(client, state, CH7006_POWER);
ch7006_save_reg(client, state, CH7006_CLKMODE);
ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
bitfs(CH7006_POWER_LEVEL, NORMAL));
ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
ch7006_write(client, CH7006_DETECT, 0);
det = ch7006_read(client, CH7006_DETECT);
ch7006_load_reg(client, state, CH7006_CLKMODE);
ch7006_load_reg(client, state, CH7006_POWER);
ch7006_load_reg(client, state, CH7006_DETECT);
if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
CH7006_DETECT_SVIDEO_C_TEST|
CH7006_DETECT_CVBS_TEST)) == 0)
priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
CH7006_DETECT_SVIDEO_C_TEST)) == 0)
priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
return priv->subconnector ? connector_status_connected :
connector_status_disconnected;
}
static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
const struct ch7006_mode *mode;
int n = 0;
for (mode = ch7006_modes; mode->mode.clock; mode++) {
if (~mode->valid_scales & 1<<priv->scale ||
~mode->valid_norms & 1<<priv->norm)
continue;
drm_mode_probed_add(connector,
drm_mode_duplicate(encoder->dev, &mode->mode));
n++;
}
return n;
}
static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct drm_device *dev = encoder->dev;
struct drm_mode_config *conf = &dev->mode_config;
drm_mode_create_tv_properties_legacy(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
if (!priv->scale_property)
return -ENOMEM;
drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector);
drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin);
drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
drm_object_attach_property(&connector->base, conf->legacy_tv_mode_property,
priv->norm);
drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast);
drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker);
drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale);
return 0;
}
static int ch7006_encoder_set_property(struct drm_encoder *encoder,
struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
struct drm_mode_config *conf = &encoder->dev->mode_config;
struct drm_crtc *crtc = encoder->crtc;
bool modes_changed = false;
ch7006_dbg(client, "\n");
if (property == conf->tv_select_subconnector_property) {
priv->select_subconnector = val;
ch7006_setup_power_state(encoder);
ch7006_load_reg(client, state, CH7006_POWER);
} else if (property == conf->tv_left_margin_property) {
priv->hmargin = val;
ch7006_setup_properties(encoder);
ch7006_load_reg(client, state, CH7006_POV);
ch7006_load_reg(client, state, CH7006_HPOS);
} else if (property == conf->tv_bottom_margin_property) {
priv->vmargin = val;
ch7006_setup_properties(encoder);
ch7006_load_reg(client, state, CH7006_POV);
ch7006_load_reg(client, state, CH7006_VPOS);
} else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
priv->norm = val;
modes_changed = true;
} else if (property == conf->tv_brightness_property) {
priv->brightness = val;
ch7006_setup_levels(encoder);
ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
} else if (property == conf->tv_contrast_property) {
priv->contrast = val;
ch7006_setup_properties(encoder);
ch7006_load_reg(client, state, CH7006_CONTRAST);
} else if (property == conf->tv_flicker_reduction_property) {
priv->flicker = val;
ch7006_setup_properties(encoder);
ch7006_load_reg(client, state, CH7006_FFILTER);
} else if (property == priv->scale_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
priv->scale = val;
modes_changed = true;
} else {
return -EINVAL;
}
if (modes_changed) {
drm_helper_probe_single_connector_modes(connector, 0, 0);
if (crtc)
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->primary->fb);
}
return 0;
}
static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
.set_config = ch7006_encoder_set_config,
.destroy = ch7006_encoder_destroy,
.dpms = ch7006_encoder_dpms,
.save = ch7006_encoder_save,
.restore = ch7006_encoder_restore,
.mode_fixup = ch7006_encoder_mode_fixup,
.mode_valid = ch7006_encoder_mode_valid,
.mode_set = ch7006_encoder_mode_set,
.detect = ch7006_encoder_detect,
.get_modes = ch7006_encoder_get_modes,
.create_resources = ch7006_encoder_create_resources,
.set_property = ch7006_encoder_set_property,
};
/* I2C driver functions */
static int ch7006_probe(struct i2c_client *client)
{
uint8_t addr = CH7006_VERSION_ID;
uint8_t val;
int ret;
ch7006_dbg(client, "\n");
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
goto fail;
ret = i2c_master_recv(client, &val, sizeof(val));
if (ret < 0)
goto fail;
ch7006_info(client, "Detected version ID: %x\n", val);
/* I don't know what this is for, but otherwise I get no
* signal.
*/
ch7006_write(client, 0x3d, 0x0);
return 0;
fail:
ch7006_err(client, "Error %d reading version ID\n", ret);
return -ENODEV;
}
static void ch7006_remove(struct i2c_client *client)
{
ch7006_dbg(client, "\n");
}
static int ch7006_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
ch7006_dbg(client, "\n");
ch7006_write(client, 0x3d, 0x0);
return 0;
}
static int ch7006_encoder_init(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder)
{
struct ch7006_priv *priv;
int i;
ch7006_dbg(client, "\n");
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
encoder->slave_priv = priv;
encoder->slave_funcs = &ch7006_encoder_funcs;
priv->norm = TV_NORM_PAL;
priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
priv->scale = 1;
priv->contrast = 50;
priv->brightness = 50;
priv->flicker = 50;
priv->hmargin = 50;
priv->vmargin = 50;
priv->last_dpms = -1;
priv->chip_version = ch7006_read(client, CH7006_VERSION_ID);
if (ch7006_tv_norm) {
for (i = 0; i < NUM_TV_NORMS; i++) {
if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
priv->norm = i;
break;
}
}
if (i == NUM_TV_NORMS)
ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
ch7006_tv_norm);
}
if (ch7006_scale >= 0 && ch7006_scale <= 2)
priv->scale = ch7006_scale;
else
ch7006_err(client, "Invalid scale setting \"%d\".\n",
ch7006_scale);
return 0;
}
static const struct i2c_device_id ch7006_ids[] = {
{ "ch7006", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
static const struct dev_pm_ops ch7006_pm_ops = {
.resume = ch7006_resume,
};
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
.driver = {
.name = "ch7006",
.pm = &ch7006_pm_ops,
},
.id_table = ch7006_ids,
},
.encoder_init = ch7006_encoder_init,
};
/* Module initialization */
static int __init ch7006_init(void)
{
return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
}
static void __exit ch7006_exit(void)
{
drm_i2c_encoder_unregister(&ch7006_driver);
}
int ch7006_debug;
module_param_named(debug, ch7006_debug, int, 0600);
MODULE_PARM_DESC(debug, "Enable debug output.");
char *ch7006_tv_norm;
module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
"\t\tDefault: PAL");
int ch7006_scale = 1;
module_param_named(scale, ch7006_scale, int, 0600);
MODULE_PARM_DESC(scale, "Default scale.\n"
"\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
"\t\t\t1 -> Select default video modes.\n"
"\t\t\t2 -> Select video modes with a lower blanking ratio.");
MODULE_AUTHOR("Francisco Jerez <[email protected]>");
MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
MODULE_LICENSE("GPL and additional rights");
module_init(ch7006_init);
module_exit(ch7006_exit);
| linux-master | drivers/gpu/drm/i2c/ch7006_drv.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/module.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/sil164.h>
struct sil164_priv {
struct sil164_encoder_params config;
struct i2c_client *duallink_slave;
uint8_t saved_state[0x10];
uint8_t saved_slave_state[0x10];
};
#define to_sil164_priv(x) \
((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
#define sil164_dbg(client, format, ...) do { \
if (drm_debug_enabled(DRM_UT_KMS)) \
dev_printk(KERN_DEBUG, &client->dev, \
"%s: " format, __func__, ## __VA_ARGS__); \
} while (0)
#define sil164_info(client, format, ...) \
dev_info(&client->dev, format, __VA_ARGS__)
#define sil164_err(client, format, ...) \
dev_err(&client->dev, format, __VA_ARGS__)
#define SIL164_I2C_ADDR_MASTER 0x38
#define SIL164_I2C_ADDR_SLAVE 0x39
/* HW register definitions */
#define SIL164_VENDOR_LO 0x0
#define SIL164_VENDOR_HI 0x1
#define SIL164_DEVICE_LO 0x2
#define SIL164_DEVICE_HI 0x3
#define SIL164_REVISION 0x4
#define SIL164_FREQ_MIN 0x6
#define SIL164_FREQ_MAX 0x7
#define SIL164_CONTROL0 0x8
# define SIL164_CONTROL0_POWER_ON 0x01
# define SIL164_CONTROL0_EDGE_RISING 0x02
# define SIL164_CONTROL0_INPUT_24BIT 0x04
# define SIL164_CONTROL0_DUAL_EDGE 0x08
# define SIL164_CONTROL0_HSYNC_ON 0x10
# define SIL164_CONTROL0_VSYNC_ON 0x20
#define SIL164_DETECT 0x9
# define SIL164_DETECT_INTR_STAT 0x01
# define SIL164_DETECT_HOTPLUG_STAT 0x02
# define SIL164_DETECT_RECEIVER_STAT 0x04
# define SIL164_DETECT_INTR_MODE_RECEIVER 0x00
# define SIL164_DETECT_INTR_MODE_HOTPLUG 0x08
# define SIL164_DETECT_OUT_MODE_HIGH 0x00
# define SIL164_DETECT_OUT_MODE_INTR 0x10
# define SIL164_DETECT_OUT_MODE_RECEIVER 0x20
# define SIL164_DETECT_OUT_MODE_HOTPLUG 0x30
# define SIL164_DETECT_VSWING_STAT 0x80
#define SIL164_CONTROL1 0xa
# define SIL164_CONTROL1_DESKEW_ENABLE 0x10
# define SIL164_CONTROL1_DESKEW_INCR_SHIFT 5
#define SIL164_GPIO 0xb
#define SIL164_CONTROL2 0xc
# define SIL164_CONTROL2_FILTER_ENABLE 0x01
# define SIL164_CONTROL2_FILTER_SETTING_SHIFT 1
# define SIL164_CONTROL2_DUALLINK_MASTER 0x40
# define SIL164_CONTROL2_SYNC_CONT 0x80
#define SIL164_DUALLINK 0xd
# define SIL164_DUALLINK_ENABLE 0x10
# define SIL164_DUALLINK_SKEW_SHIFT 5
#define SIL164_PLLZONE 0xe
# define SIL164_PLLZONE_STAT 0x08
# define SIL164_PLLZONE_FORCE_ON 0x10
# define SIL164_PLLZONE_FORCE_HIGH 0x20
/* HW access functions */
static void
sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val)
{
uint8_t buf[] = {addr, val};
int ret;
ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
if (ret < 0)
sil164_err(client, "Error %d writing to subaddress 0x%x\n",
ret, addr);
}
static uint8_t
sil164_read(struct i2c_client *client, uint8_t addr)
{
uint8_t val;
int ret;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
goto fail;
ret = i2c_master_recv(client, &val, sizeof(val));
if (ret < 0)
goto fail;
return val;
fail:
sil164_err(client, "Error %d reading from subaddress 0x%x\n",
ret, addr);
return 0;
}
static void
sil164_save_state(struct i2c_client *client, uint8_t *state)
{
int i;
for (i = 0x8; i <= 0xe; i++)
state[i] = sil164_read(client, i);
}
static void
sil164_restore_state(struct i2c_client *client, uint8_t *state)
{
int i;
for (i = 0x8; i <= 0xe; i++)
sil164_write(client, i, state[i]);
}
static void
sil164_set_power_state(struct i2c_client *client, bool on)
{
uint8_t control0 = sil164_read(client, SIL164_CONTROL0);
if (on)
control0 |= SIL164_CONTROL0_POWER_ON;
else
control0 &= ~SIL164_CONTROL0_POWER_ON;
sil164_write(client, SIL164_CONTROL0, control0);
}
static void
sil164_init_state(struct i2c_client *client,
struct sil164_encoder_params *config,
bool duallink)
{
sil164_write(client, SIL164_CONTROL0,
SIL164_CONTROL0_HSYNC_ON |
SIL164_CONTROL0_VSYNC_ON |
(config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) |
(config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) |
(config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0));
sil164_write(client, SIL164_DETECT,
SIL164_DETECT_INTR_STAT |
SIL164_DETECT_OUT_MODE_RECEIVER);
sil164_write(client, SIL164_CONTROL1,
(config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) |
(((config->input_skew + 4) & 0x7)
<< SIL164_CONTROL1_DESKEW_INCR_SHIFT));
sil164_write(client, SIL164_CONTROL2,
SIL164_CONTROL2_SYNC_CONT |
(config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) |
(4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT));
sil164_write(client, SIL164_PLLZONE, 0);
if (duallink)
sil164_write(client, SIL164_DUALLINK,
SIL164_DUALLINK_ENABLE |
(((config->duallink_skew + 4) & 0x7)
<< SIL164_DUALLINK_SKEW_SHIFT));
else
sil164_write(client, SIL164_DUALLINK, 0);
}
/* DRM encoder functions */
static void
sil164_encoder_set_config(struct drm_encoder *encoder, void *params)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
priv->config = *(struct sil164_encoder_params *)params;
}
static void
sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
bool on = (mode == DRM_MODE_DPMS_ON);
bool duallink = (on && encoder->crtc->mode.clock > 165000);
sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
if (priv->duallink_slave)
sil164_set_power_state(priv->duallink_slave, duallink);
}
static void
sil164_encoder_save(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
sil164_save_state(drm_i2c_encoder_get_client(encoder),
priv->saved_state);
if (priv->duallink_slave)
sil164_save_state(priv->duallink_slave,
priv->saved_slave_state);
}
static void
sil164_encoder_restore(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
sil164_restore_state(drm_i2c_encoder_get_client(encoder),
priv->saved_state);
if (priv->duallink_slave)
sil164_restore_state(priv->duallink_slave,
priv->saved_slave_state);
}
static int
sil164_encoder_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
if (mode->clock < 32000)
return MODE_CLOCK_LOW;
if (mode->clock > 330000 ||
(mode->clock > 165000 && !priv->duallink_slave))
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void
sil164_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
bool duallink = adjusted_mode->clock > 165000;
sil164_init_state(drm_i2c_encoder_get_client(encoder),
&priv->config, duallink);
if (priv->duallink_slave)
sil164_init_state(priv->duallink_slave,
&priv->config, duallink);
sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static enum drm_connector_status
sil164_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
return connector_status_connected;
else
return connector_status_disconnected;
}
static int
sil164_encoder_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
return 0;
}
static int
sil164_encoder_create_resources(struct drm_encoder *encoder,
struct drm_connector *connector)
{
return 0;
}
static int
sil164_encoder_set_property(struct drm_encoder *encoder,
struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
static void
sil164_encoder_destroy(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
i2c_unregister_device(priv->duallink_slave);
kfree(priv);
drm_i2c_encoder_destroy(encoder);
}
static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
.set_config = sil164_encoder_set_config,
.destroy = sil164_encoder_destroy,
.dpms = sil164_encoder_dpms,
.save = sil164_encoder_save,
.restore = sil164_encoder_restore,
.mode_valid = sil164_encoder_mode_valid,
.mode_set = sil164_encoder_mode_set,
.detect = sil164_encoder_detect,
.get_modes = sil164_encoder_get_modes,
.create_resources = sil164_encoder_create_resources,
.set_property = sil164_encoder_set_property,
};
/* I2C driver functions */
static int
sil164_probe(struct i2c_client *client)
{
int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
sil164_read(client, SIL164_VENDOR_LO);
int device = sil164_read(client, SIL164_DEVICE_HI) << 8 |
sil164_read(client, SIL164_DEVICE_LO);
int rev = sil164_read(client, SIL164_REVISION);
if (vendor != 0x1 || device != 0x6) {
sil164_dbg(client, "Unknown device %x:%x.%x\n",
vendor, device, rev);
return -ENODEV;
}
sil164_info(client, "Detected device %x:%x.%x\n",
vendor, device, rev);
return 0;
}
static struct i2c_client *
sil164_detect_slave(struct i2c_client *client)
{
struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg = {
.addr = SIL164_I2C_ADDR_SLAVE,
.len = 0,
};
const struct i2c_board_info info = {
I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE)
};
if (i2c_transfer(adap, &msg, 1) != 1) {
sil164_dbg(adap, "No dual-link slave found.");
return NULL;
}
return i2c_new_client_device(adap, &info);
}
static int
sil164_encoder_init(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder)
{
struct sil164_priv *priv;
struct i2c_client *slave_client;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
encoder->slave_priv = priv;
encoder->slave_funcs = &sil164_encoder_funcs;
slave_client = sil164_detect_slave(client);
if (!IS_ERR(slave_client))
priv->duallink_slave = slave_client;
return 0;
}
static const struct i2c_device_id sil164_ids[] = {
{ "sil164", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
.driver = {
.name = "sil164",
},
.id_table = sil164_ids,
},
.encoder_init = sil164_encoder_init,
};
/* Module initialization */
static int __init
sil164_init(void)
{
return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
}
static void __exit
sil164_exit(void)
{
drm_i2c_encoder_unregister(&sil164_driver);
}
MODULE_AUTHOR("Francisco Jerez <[email protected]>");
MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver");
MODULE_LICENSE("GPL and additional rights");
module_init(sil164_init);
module_exit(sil164_exit);
| linux-master | drivers/gpu/drm/i2c/sil164_drv.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "ch7006_priv.h"
const char * const ch7006_tv_norm_names[] = {
[TV_NORM_PAL] = "PAL",
[TV_NORM_PAL_M] = "PAL-M",
[TV_NORM_PAL_N] = "PAL-N",
[TV_NORM_PAL_NC] = "PAL-Nc",
[TV_NORM_PAL_60] = "PAL-60",
[TV_NORM_NTSC_M] = "NTSC-M",
[TV_NORM_NTSC_J] = "NTSC-J",
};
#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001, \
.vdisplay = 480, \
.vtotal = 525, \
.hvirtual = 660
#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1, \
.vdisplay = 576, \
.vtotal = 625, \
.hvirtual = 810
const struct ch7006_tv_norm_info ch7006_tv_norms[] = {
[TV_NORM_NTSC_M] = {
NTSC_LIKE_TIMINGS,
.black_level = 0.339 * fixed1,
.subc_freq = 3579545 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
.voffset = 0,
},
[TV_NORM_NTSC_J] = {
NTSC_LIKE_TIMINGS,
.black_level = 0.286 * fixed1,
.subc_freq = 3579545 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
.voffset = 0,
},
[TV_NORM_PAL] = {
PAL_LIKE_TIMINGS,
.black_level = 0.3 * fixed1,
.subc_freq = 4433618.75 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
.voffset = 0,
},
[TV_NORM_PAL_M] = {
NTSC_LIKE_TIMINGS,
.black_level = 0.339 * fixed1,
.subc_freq = 3575611.433 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
.voffset = 16,
},
/* The following modes seem to work right but they're
* undocumented */
[TV_NORM_PAL_N] = {
PAL_LIKE_TIMINGS,
.black_level = 0.339 * fixed1,
.subc_freq = 4433618.75 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
.voffset = 0,
},
[TV_NORM_PAL_NC] = {
PAL_LIKE_TIMINGS,
.black_level = 0.3 * fixed1,
.subc_freq = 3582056.25 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
.voffset = 0,
},
[TV_NORM_PAL_60] = {
NTSC_LIKE_TIMINGS,
.black_level = 0.3 * fixed1,
.subc_freq = 4433618.75 * fixed1,
.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
.voffset = 16,
},
};
#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
subc, scale, scale_mask, norm_mask, e_hd, e_vd) { \
.mode = { \
.name = #hd "x" #vd, \
.status = 0, \
.type = DRM_MODE_TYPE_DRIVER, \
.clock = f, \
.hdisplay = hd, \
.hsync_start = e_hd + 16, \
.hsync_end = e_hd + 80, \
.htotal = ht, \
.hskew = 0, \
.vdisplay = vd, \
.vsync_start = vd + 10, \
.vsync_end = vd + 26, \
.vtotal = vt, \
.vscan = 0, \
.flags = DRM_MODE_FLAG_##hsynp##HSYNC | \
DRM_MODE_FLAG_##vsynp##VSYNC, \
}, \
.enc_hdisp = e_hd, \
.enc_vdisp = e_vd, \
.subc_coeff = subc * fixed1, \
.dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
.valid_scales = scale_mask, \
.valid_norms = norm_mask \
}
#define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
subc, scale, scale_mask, norm_mask) \
__MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \
scale_mask, norm_mask, hd, vd)
#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J | \
1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
const struct ch7006_mode ch7006_modes[] = {
MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
__MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
{}
};
const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
const struct drm_display_mode *drm_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
const struct ch7006_mode *mode;
for (mode = ch7006_modes; mode->mode.clock; mode++) {
if (~mode->valid_norms & 1<<priv->norm)
continue;
if (mode->mode.hdisplay != drm_mode->hdisplay ||
mode->mode.vdisplay != drm_mode->vdisplay ||
mode->mode.vtotal != drm_mode->vtotal ||
mode->mode.htotal != drm_mode->htotal ||
mode->mode.clock != drm_mode->clock)
continue;
return mode;
}
return NULL;
}
/* Some common HW state calculation code */
void ch7006_setup_levels(struct drm_encoder *encoder)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *regs = priv->state.regs;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
int gain;
int black_level;
/* Set DAC_GAIN if the voltage drop between white and black is
* high enough. */
if (norm->black_level < 339*fixed1/1000) {
gain = 76;
regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
} else {
gain = 71;
regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
}
black_level = round_fixed(norm->black_level*26625)/gain;
/* Correct it with the specified brightness. */
black_level = interpolate(90, black_level, 208, priv->brightness);
regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
ch7006_dbg(client, "black level: %d\n", black_level);
}
void ch7006_setup_subcarrier(struct drm_encoder *encoder)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
const struct ch7006_mode *mode = priv->mode;
uint32_t subc_inc;
subc_inc = round_fixed((mode->subc_coeff >> 8)
* (norm->subc_freq >> 24));
setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
}
void ch7006_setup_pll(struct drm_encoder *encoder)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *regs = priv->state.regs;
const struct ch7006_mode *mode = priv->mode;
int n, best_n = 0;
int m, best_m = 0;
int freq, best_freq = 0;
for (n = 0; n < CH7006_MAXN; n++) {
for (m = 0; m < CH7006_MAXM; m++) {
freq = CH7006_FREQ0*(n+2)/(m+2);
if (abs(freq - mode->mode.clock) <
abs(best_freq - mode->mode.clock)) {
best_freq = freq;
best_n = n;
best_m = m;
}
}
}
regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
bitf(CH7006_PLLOV_M_8, best_m);
regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
if (best_n < 108)
regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
else
regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
best_n, best_m, best_freq, best_n < 108);
}
void ch7006_setup_power_state(struct drm_encoder *encoder)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *power = &priv->state.regs[CH7006_POWER];
int subconnector;
subconnector = priv->select_subconnector ? priv->select_subconnector :
priv->subconnector;
*power = CH7006_POWER_RESET;
if (priv->last_dpms == DRM_MODE_DPMS_ON) {
switch (subconnector) {
case DRM_MODE_SUBCONNECTOR_SVIDEO:
*power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
break;
case DRM_MODE_SUBCONNECTOR_Composite:
*power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
break;
case DRM_MODE_SUBCONNECTOR_SCART:
*power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
CH7006_POWER_SCART;
break;
}
} else {
if (priv->chip_version >= 0x20)
*power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
else
*power |= bitfs(CH7006_POWER_LEVEL, POWER_OFF);
}
}
void ch7006_setup_properties(struct drm_encoder *encoder)
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
const struct ch7006_mode *ch_mode = priv->mode;
const struct drm_display_mode *mode = &ch_mode->mode;
uint8_t *regs = state->regs;
int flicker, contrast, hpos, vpos;
uint64_t scale, aspect;
flicker = interpolate(0, 2, 3, priv->flicker);
regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
bitf(CH7006_FFILTER_LUMA, flicker) |
bitf(CH7006_FFILTER_CHROMA, 1);
contrast = interpolate(0, 5, 7, priv->contrast);
regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
scale = norm->vtotal*fixed1;
do_div(scale, mode->vtotal);
aspect = ch_mode->enc_hdisp*fixed1;
do_div(aspect, ch_mode->enc_vdisp);
hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
* priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
setbitf(state, CH7006_POV, HPOS_8, hpos);
setbitf(state, CH7006_HPOS, 0, hpos);
vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
+ norm->voffset) * priv->vmargin / 100 / 2;
setbitf(state, CH7006_POV, VPOS_8, vpos);
setbitf(state, CH7006_VPOS, 0, vpos);
ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
}
/* HW access functions */
void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
{
uint8_t buf[] = {addr, val};
int ret;
ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
if (ret < 0)
ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
ret, addr);
}
uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
{
uint8_t val;
int ret;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
goto fail;
ret = i2c_master_recv(client, &val, sizeof(val));
if (ret < 0)
goto fail;
return val;
fail:
ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
ret, addr);
return 0;
}
void ch7006_state_load(struct i2c_client *client,
struct ch7006_state *state)
{
ch7006_load_reg(client, state, CH7006_POWER);
ch7006_load_reg(client, state, CH7006_DISPMODE);
ch7006_load_reg(client, state, CH7006_FFILTER);
ch7006_load_reg(client, state, CH7006_BWIDTH);
ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
ch7006_load_reg(client, state, CH7006_CLKMODE);
ch7006_load_reg(client, state, CH7006_START_ACTIVE);
ch7006_load_reg(client, state, CH7006_POV);
ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
ch7006_load_reg(client, state, CH7006_HPOS);
ch7006_load_reg(client, state, CH7006_VPOS);
ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
ch7006_load_reg(client, state, CH7006_DETECT);
ch7006_load_reg(client, state, CH7006_CONTRAST);
ch7006_load_reg(client, state, CH7006_PLLOV);
ch7006_load_reg(client, state, CH7006_PLLM);
ch7006_load_reg(client, state, CH7006_PLLN);
ch7006_load_reg(client, state, CH7006_BCLKOUT);
ch7006_load_reg(client, state, CH7006_SUBC_INC0);
ch7006_load_reg(client, state, CH7006_SUBC_INC1);
ch7006_load_reg(client, state, CH7006_SUBC_INC2);
ch7006_load_reg(client, state, CH7006_SUBC_INC3);
ch7006_load_reg(client, state, CH7006_SUBC_INC4);
ch7006_load_reg(client, state, CH7006_SUBC_INC5);
ch7006_load_reg(client, state, CH7006_SUBC_INC6);
ch7006_load_reg(client, state, CH7006_SUBC_INC7);
ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
}
void ch7006_state_save(struct i2c_client *client,
struct ch7006_state *state)
{
ch7006_save_reg(client, state, CH7006_POWER);
ch7006_save_reg(client, state, CH7006_DISPMODE);
ch7006_save_reg(client, state, CH7006_FFILTER);
ch7006_save_reg(client, state, CH7006_BWIDTH);
ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
ch7006_save_reg(client, state, CH7006_CLKMODE);
ch7006_save_reg(client, state, CH7006_START_ACTIVE);
ch7006_save_reg(client, state, CH7006_POV);
ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
ch7006_save_reg(client, state, CH7006_HPOS);
ch7006_save_reg(client, state, CH7006_VPOS);
ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
ch7006_save_reg(client, state, CH7006_DETECT);
ch7006_save_reg(client, state, CH7006_CONTRAST);
ch7006_save_reg(client, state, CH7006_PLLOV);
ch7006_save_reg(client, state, CH7006_PLLM);
ch7006_save_reg(client, state, CH7006_PLLN);
ch7006_save_reg(client, state, CH7006_BCLKOUT);
ch7006_save_reg(client, state, CH7006_SUBC_INC0);
ch7006_save_reg(client, state, CH7006_SUBC_INC1);
ch7006_save_reg(client, state, CH7006_SUBC_INC2);
ch7006_save_reg(client, state, CH7006_SUBC_INC3);
ch7006_save_reg(client, state, CH7006_SUBC_INC4);
ch7006_save_reg(client, state, CH7006_SUBC_INC5);
ch7006_save_reg(client, state, CH7006_SUBC_INC6);
ch7006_save_reg(client, state, CH7006_SUBC_INC7);
ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
(state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
(state->regs[CH7006_FFILTER] & 0x03) << 2;
}
| linux-master | drivers/gpu/drm/i2c/ch7006_mode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TDA9950 Consumer Electronics Control driver
*
* The NXP TDA9950 implements the HDMI Consumer Electronics Control
* interface. The host interface is similar to a mailbox: the data
* registers starting at REG_CDR0 are written to send a command to the
* internal CPU, and replies are read from these registers.
*
* As the data registers represent a mailbox, they must be accessed
* as a single I2C transaction. See the TDA9950 data sheet for details.
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_data/tda9950.h>
#include <linux/slab.h>
#include <drm/drm_edid.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
enum {
REG_CSR = 0x00,
CSR_BUSY = BIT(7),
CSR_INT = BIT(6),
CSR_ERR = BIT(5),
REG_CER = 0x01,
REG_CVR = 0x02,
REG_CCR = 0x03,
CCR_RESET = BIT(7),
CCR_ON = BIT(6),
REG_ACKH = 0x04,
REG_ACKL = 0x05,
REG_CCONR = 0x06,
CCONR_ENABLE_ERROR = BIT(4),
CCONR_RETRY_MASK = 7,
REG_CDR0 = 0x07,
CDR1_REQ = 0x00,
CDR1_CNF = 0x01,
CDR1_IND = 0x81,
CDR1_ERR = 0x82,
CDR1_IER = 0x83,
CDR2_CNF_SUCCESS = 0x00,
CDR2_CNF_OFF_STATE = 0x80,
CDR2_CNF_BAD_REQ = 0x81,
CDR2_CNF_CEC_ACCESS = 0x82,
CDR2_CNF_ARB_ERROR = 0x83,
CDR2_CNF_BAD_TIMING = 0x84,
CDR2_CNF_NACK_ADDR = 0x85,
CDR2_CNF_NACK_DATA = 0x86,
};
struct tda9950_priv {
struct i2c_client *client;
struct device *hdmi;
struct cec_adapter *adap;
struct tda9950_glue *glue;
u16 addresses;
struct cec_msg rx_msg;
struct cec_notifier *notify;
bool open;
};
static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
{
struct i2c_msg msg;
u8 buf[CEC_MAX_MSG_SIZE + 3];
int ret;
if (WARN_ON(cnt > sizeof(buf) - 1))
return -EINVAL;
buf[0] = addr;
memcpy(buf + 1, p, cnt);
msg.addr = client->addr;
msg.flags = 0;
msg.len = cnt + 1;
msg.buf = buf;
dev_dbg(&client->dev, "wr 0x%02x: %*ph\n", addr, cnt, p);
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
return ret < 0 ? ret : 0;
}
static void tda9950_write(struct i2c_client *client, u8 addr, u8 val)
{
tda9950_write_range(client, addr, &val, 1);
}
static int tda9950_read_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
{
struct i2c_msg msg[2];
int ret;
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = &addr;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].len = cnt;
msg[1].buf = p;
ret = i2c_transfer(client->adapter, msg, 2);
if (ret < 0)
dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
dev_dbg(&client->dev, "rd 0x%02x: %*ph\n", addr, cnt, p);
return ret;
}
static u8 tda9950_read(struct i2c_client *client, u8 addr)
{
int ret;
u8 val;
ret = tda9950_read_range(client, addr, &val, 1);
if (ret < 0)
val = 0;
return val;
}
static irqreturn_t tda9950_irq(int irq, void *data)
{
struct tda9950_priv *priv = data;
unsigned int tx_status;
u8 csr, cconr, buf[19];
u8 arb_lost_cnt, nack_cnt, err_cnt;
if (!priv->open)
return IRQ_NONE;
csr = tda9950_read(priv->client, REG_CSR);
if (!(csr & CSR_INT))
return IRQ_NONE;
cconr = tda9950_read(priv->client, REG_CCONR) & CCONR_RETRY_MASK;
tda9950_read_range(priv->client, REG_CDR0, buf, sizeof(buf));
/*
* This should never happen: the data sheet says that there will
* always be a valid message if the interrupt line is asserted.
*/
if (buf[0] == 0) {
dev_warn(&priv->client->dev, "interrupt pending, but no message?\n");
return IRQ_NONE;
}
switch (buf[1]) {
case CDR1_CNF: /* transmit result */
arb_lost_cnt = nack_cnt = err_cnt = 0;
switch (buf[2]) {
case CDR2_CNF_SUCCESS:
tx_status = CEC_TX_STATUS_OK;
break;
case CDR2_CNF_ARB_ERROR:
tx_status = CEC_TX_STATUS_ARB_LOST;
arb_lost_cnt = cconr;
break;
case CDR2_CNF_NACK_ADDR:
tx_status = CEC_TX_STATUS_NACK;
nack_cnt = cconr;
break;
default: /* some other error, refer to TDA9950 docs */
dev_err(&priv->client->dev, "CNF reply error 0x%02x\n",
buf[2]);
tx_status = CEC_TX_STATUS_ERROR;
err_cnt = cconr;
break;
}
/* TDA9950 executes all retries for us */
if (tx_status != CEC_TX_STATUS_OK)
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
nack_cnt, 0, err_cnt);
break;
case CDR1_IND:
priv->rx_msg.len = buf[0] - 2;
if (priv->rx_msg.len > CEC_MAX_MSG_SIZE)
priv->rx_msg.len = CEC_MAX_MSG_SIZE;
memcpy(priv->rx_msg.msg, buf + 2, priv->rx_msg.len);
cec_received_msg(priv->adap, &priv->rx_msg);
break;
default: /* unknown */
dev_err(&priv->client->dev, "unknown service id 0x%02x\n",
buf[1]);
break;
}
return IRQ_HANDLED;
}
static int tda9950_cec_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct tda9950_priv *priv = adap->priv;
u8 buf[CEC_MAX_MSG_SIZE + 2];
buf[0] = 2 + msg->len;
buf[1] = CDR1_REQ;
memcpy(buf + 2, msg->msg, msg->len);
if (attempts > 5)
attempts = 5;
tda9950_write(priv->client, REG_CCONR, attempts);
return tda9950_write_range(priv->client, REG_CDR0, buf, 2 + msg->len);
}
static int tda9950_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct tda9950_priv *priv = adap->priv;
u16 addresses;
u8 buf[2];
if (addr == CEC_LOG_ADDR_INVALID)
addresses = priv->addresses = 0;
else
addresses = priv->addresses |= BIT(addr);
/* TDA9950 doesn't want address 15 set */
addresses &= 0x7fff;
buf[0] = addresses >> 8;
buf[1] = addresses;
return tda9950_write_range(priv->client, REG_ACKH, buf, 2);
}
/*
* When operating as part of the TDA998x, we need additional handling
* to initialise and shut down the TDA9950 part of the device. These
* two hooks are provided to allow the TDA998x code to perform those
* activities.
*/
static int tda9950_glue_open(struct tda9950_priv *priv)
{
int ret = 0;
if (priv->glue && priv->glue->open)
ret = priv->glue->open(priv->glue->data);
priv->open = true;
return ret;
}
static void tda9950_glue_release(struct tda9950_priv *priv)
{
priv->open = false;
if (priv->glue && priv->glue->release)
priv->glue->release(priv->glue->data);
}
static int tda9950_open(struct tda9950_priv *priv)
{
struct i2c_client *client = priv->client;
int ret;
ret = tda9950_glue_open(priv);
if (ret)
return ret;
/* Reset the TDA9950, and wait 250ms for it to recover */
tda9950_write(client, REG_CCR, CCR_RESET);
msleep(250);
tda9950_cec_adap_log_addr(priv->adap, CEC_LOG_ADDR_INVALID);
/* Start the command processor */
tda9950_write(client, REG_CCR, CCR_ON);
return 0;
}
static void tda9950_release(struct tda9950_priv *priv)
{
struct i2c_client *client = priv->client;
int timeout = 50;
u8 csr;
/* Stop the command processor */
tda9950_write(client, REG_CCR, 0);
/* Wait up to .5s for it to signal non-busy */
do {
csr = tda9950_read(client, REG_CSR);
if (!(csr & CSR_BUSY) || !--timeout)
break;
msleep(10);
} while (1);
/* Warn the user that their IRQ may die if it's shared. */
if (csr & CSR_BUSY)
dev_warn(&client->dev, "command processor failed to stop, irq%d may die (csr=0x%02x)\n",
client->irq, csr);
tda9950_glue_release(priv);
}
static int tda9950_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct tda9950_priv *priv = adap->priv;
if (!enable) {
tda9950_release(priv);
return 0;
} else {
return tda9950_open(priv);
}
}
static const struct cec_adap_ops tda9950_cec_ops = {
.adap_enable = tda9950_cec_adap_enable,
.adap_log_addr = tda9950_cec_adap_log_addr,
.adap_transmit = tda9950_cec_transmit,
};
/*
* When operating as part of the TDA998x, we need to claim additional
* resources. These two hooks permit the management of those resources.
*/
static void tda9950_devm_glue_exit(void *data)
{
struct tda9950_glue *glue = data;
if (glue && glue->exit)
glue->exit(glue->data);
}
static int tda9950_devm_glue_init(struct device *dev, struct tda9950_glue *glue)
{
int ret;
if (glue && glue->init) {
ret = glue->init(glue->data);
if (ret)
return ret;
}
ret = devm_add_action(dev, tda9950_devm_glue_exit, glue);
if (ret)
tda9950_devm_glue_exit(glue);
return ret;
}
static void tda9950_cec_del(void *data)
{
struct tda9950_priv *priv = data;
cec_delete_adapter(priv->adap);
}
static int tda9950_probe(struct i2c_client *client)
{
struct tda9950_glue *glue = client->dev.platform_data;
struct device *dev = &client->dev;
struct tda9950_priv *priv;
unsigned long irqflags;
int ret;
u8 cvr;
/*
* We must have I2C functionality: our multi-byte accesses
* must be performed as a single contiguous transaction.
*/
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev,
"adapter does not support I2C functionality\n");
return -ENXIO;
}
/* We must have an interrupt to be functional. */
if (client->irq <= 0) {
dev_err(&client->dev, "driver requires an interrupt\n");
return -ENXIO;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->client = client;
priv->glue = glue;
i2c_set_clientdata(client, priv);
/*
* If we're part of a TDA998x, we want the class devices to be
* associated with the HDMI Tx so we have a tight relationship
* between the HDMI interface and the CEC interface.
*/
priv->hdmi = dev;
if (glue && glue->parent)
priv->hdmi = glue->parent;
priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(priv->adap))
return PTR_ERR(priv->adap);
ret = devm_add_action(dev, tda9950_cec_del, priv);
if (ret) {
cec_delete_adapter(priv->adap);
return ret;
}
ret = tda9950_devm_glue_init(dev, glue);
if (ret)
return ret;
ret = tda9950_glue_open(priv);
if (ret)
return ret;
cvr = tda9950_read(client, REG_CVR);
dev_info(&client->dev,
"TDA9950 CEC interface, hardware version %u.%u\n",
cvr >> 4, cvr & 15);
tda9950_glue_release(priv);
irqflags = IRQF_TRIGGER_FALLING;
if (glue)
irqflags = glue->irq_flags;
ret = devm_request_threaded_irq(dev, client->irq, NULL, tda9950_irq,
irqflags | IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), priv);
if (ret < 0)
return ret;
priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL,
priv->adap);
if (!priv->notify)
return -ENOMEM;
ret = cec_register_adapter(priv->adap, priv->hdmi);
if (ret < 0) {
cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
return ret;
}
/*
* CEC documentation says we must not call cec_delete_adapter
* after a successful call to cec_register_adapter().
*/
devm_remove_action(dev, tda9950_cec_del, priv);
return 0;
}
static void tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
}
static struct i2c_device_id tda9950_ids[] = {
{ "tda9950", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
static struct i2c_driver tda9950_driver = {
.probe = tda9950_probe,
.remove = tda9950_remove,
.driver = {
.name = "tda9950",
},
.id_table = tda9950_ids,
};
module_i2c_driver(tda9950_driver);
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_DESCRIPTION("TDA9950/TDA998x Consumer Electronics Control Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/i2c/tda9950.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <[email protected]>
*/
#include <linux/component.h>
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_data/tda9950.h>
#include <linux/irq.h>
#include <sound/asoundef.h>
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
enum {
AUDIO_ROUTE_I2S,
AUDIO_ROUTE_SPDIF,
AUDIO_ROUTE_NUM
};
struct tda998x_audio_route {
u8 ena_aclk;
u8 mux_ap;
u8 aip_clksel;
};
struct tda998x_audio_settings {
const struct tda998x_audio_route *route;
struct hdmi_audio_infoframe cea;
unsigned int sample_rate;
u8 status[5];
u8 ena_ap;
u8 i2s_format;
u8 cts_n;
};
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
struct mutex mutex;
u16 rev;
u8 cec_addr;
u8 current_page;
bool is_on;
bool supports_infoframes;
bool sink_has_audio;
enum hdmi_quantization_range rgb_quant_range;
u8 vip_cntrl_0;
u8 vip_cntrl_1;
u8 vip_cntrl_2;
unsigned long tmds_clock;
struct tda998x_audio_settings audio;
struct platform_device *audio_pdev;
struct mutex audio_mutex;
struct mutex edid_mutex;
wait_queue_head_t wq_edid;
volatile int wq_edid_wait;
struct work_struct detect_work;
struct timer_list edid_delay_timer;
wait_queue_head_t edid_delay_waitq;
bool edid_delay_active;
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_connector connector;
u8 audio_port_enable[AUDIO_ROUTE_NUM];
struct tda9950_glue cec_glue;
struct gpio_desc *calib;
struct cec_notifier *cec_notify;
};
#define conn_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, connector)
#define enc_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, encoder)
#define bridge_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, bridge)
/* The TDA9988 series of devices use a paged register scheme.. to simplify
* things we encode the page # in upper bits of the register #. To read/
* write a given register, we need to make sure CURPAGE register is set
* appropriately. Which implies reads/writes are not atomic. Fun!
*/
#define REG(page, addr) (((page) << 8) | (addr))
#define REG2ADDR(reg) ((reg) & 0xff)
#define REG2PAGE(reg) (((reg) >> 8) & 0xff)
#define REG_CURPAGE 0xff /* write */
/* Page 00h: General Control */
#define REG_VERSION_LSB REG(0x00, 0x00) /* read */
#define REG_MAIN_CNTRL0 REG(0x00, 0x01) /* read/write */
# define MAIN_CNTRL0_SR (1 << 0)
# define MAIN_CNTRL0_DECS (1 << 1)
# define MAIN_CNTRL0_DEHS (1 << 2)
# define MAIN_CNTRL0_CECS (1 << 3)
# define MAIN_CNTRL0_CEHS (1 << 4)
# define MAIN_CNTRL0_SCALER (1 << 7)
#define REG_VERSION_MSB REG(0x00, 0x02) /* read */
#define REG_SOFTRESET REG(0x00, 0x0a) /* write */
# define SOFTRESET_AUDIO (1 << 0)
# define SOFTRESET_I2C_MASTER (1 << 1)
#define REG_DDC_DISABLE REG(0x00, 0x0b) /* read/write */
#define REG_CCLK_ON REG(0x00, 0x0c) /* read/write */
#define REG_I2C_MASTER REG(0x00, 0x0d) /* read/write */
# define I2C_MASTER_DIS_MM (1 << 0)
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
# define FEAT_POWERDOWN_PREFILT BIT(0)
# define FEAT_POWERDOWN_CSC BIT(1)
# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
#define REG_ENA_AP REG(0x00, 0x1e) /* read/write */
#define REG_VIP_CNTRL_0 REG(0x00, 0x20) /* write */
# define VIP_CNTRL_0_MIRR_A (1 << 7)
# define VIP_CNTRL_0_SWAP_A(x) (((x) & 7) << 4)
# define VIP_CNTRL_0_MIRR_B (1 << 3)
# define VIP_CNTRL_0_SWAP_B(x) (((x) & 7) << 0)
#define REG_VIP_CNTRL_1 REG(0x00, 0x21) /* write */
# define VIP_CNTRL_1_MIRR_C (1 << 7)
# define VIP_CNTRL_1_SWAP_C(x) (((x) & 7) << 4)
# define VIP_CNTRL_1_MIRR_D (1 << 3)
# define VIP_CNTRL_1_SWAP_D(x) (((x) & 7) << 0)
#define REG_VIP_CNTRL_2 REG(0x00, 0x22) /* write */
# define VIP_CNTRL_2_MIRR_E (1 << 7)
# define VIP_CNTRL_2_SWAP_E(x) (((x) & 7) << 4)
# define VIP_CNTRL_2_MIRR_F (1 << 3)
# define VIP_CNTRL_2_SWAP_F(x) (((x) & 7) << 0)
#define REG_VIP_CNTRL_3 REG(0x00, 0x23) /* write */
# define VIP_CNTRL_3_X_TGL (1 << 0)
# define VIP_CNTRL_3_H_TGL (1 << 1)
# define VIP_CNTRL_3_V_TGL (1 << 2)
# define VIP_CNTRL_3_EMB (1 << 3)
# define VIP_CNTRL_3_SYNC_DE (1 << 4)
# define VIP_CNTRL_3_SYNC_HS (1 << 5)
# define VIP_CNTRL_3_DE_INT (1 << 6)
# define VIP_CNTRL_3_EDGE (1 << 7)
#define REG_VIP_CNTRL_4 REG(0x00, 0x24) /* write */
# define VIP_CNTRL_4_BLC(x) (((x) & 3) << 0)
# define VIP_CNTRL_4_BLANKIT(x) (((x) & 3) << 2)
# define VIP_CNTRL_4_CCIR656 (1 << 4)
# define VIP_CNTRL_4_656_ALT (1 << 5)
# define VIP_CNTRL_4_TST_656 (1 << 6)
# define VIP_CNTRL_4_TST_PAT (1 << 7)
#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
# define VIP_CNTRL_5_CKCASE (1 << 0)
# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
# define MUX_AP_SELECT_I2S 0x64
# define MUX_AP_SELECT_SPDIF 0x40
#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
# define MAT_CONTRL_MAT_BP (1 << 2)
#define REG_VIDFORMAT REG(0x00, 0xa0) /* write */
#define REG_REFPIX_MSB REG(0x00, 0xa1) /* write */
#define REG_REFPIX_LSB REG(0x00, 0xa2) /* write */
#define REG_REFLINE_MSB REG(0x00, 0xa3) /* write */
#define REG_REFLINE_LSB REG(0x00, 0xa4) /* write */
#define REG_NPIX_MSB REG(0x00, 0xa5) /* write */
#define REG_NPIX_LSB REG(0x00, 0xa6) /* write */
#define REG_NLINE_MSB REG(0x00, 0xa7) /* write */
#define REG_NLINE_LSB REG(0x00, 0xa8) /* write */
#define REG_VS_LINE_STRT_1_MSB REG(0x00, 0xa9) /* write */
#define REG_VS_LINE_STRT_1_LSB REG(0x00, 0xaa) /* write */
#define REG_VS_PIX_STRT_1_MSB REG(0x00, 0xab) /* write */
#define REG_VS_PIX_STRT_1_LSB REG(0x00, 0xac) /* write */
#define REG_VS_LINE_END_1_MSB REG(0x00, 0xad) /* write */
#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
#define REG_HS_PIX_START_LSB REG(0x00, 0xba) /* write */
#define REG_HS_PIX_STOP_MSB REG(0x00, 0xbb) /* write */
#define REG_HS_PIX_STOP_LSB REG(0x00, 0xbc) /* write */
#define REG_VWIN_START_1_MSB REG(0x00, 0xbd) /* write */
#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
# define TBG_CNTRL_0_TOP_TGL (1 << 0)
# define TBG_CNTRL_0_TOP_SEL (1 << 1)
# define TBG_CNTRL_0_DE_EXT (1 << 2)
# define TBG_CNTRL_0_TOP_EXT (1 << 3)
# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
# define TBG_CNTRL_1_H_TGL (1 << 0)
# define TBG_CNTRL_1_V_TGL (1 << 1)
# define TBG_CNTRL_1_TGL_EN (1 << 2)
# define TBG_CNTRL_1_X_EXT (1 << 3)
# define TBG_CNTRL_1_H_EXT (1 << 4)
# define TBG_CNTRL_1_V_EXT (1 << 5)
# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
# define HVF_CNTRL_0_SM (1 << 7)
# define HVF_CNTRL_0_RWB (1 << 6)
# define HVF_CNTRL_0_PREFIL(x) (((x) & 3) << 2)
# define HVF_CNTRL_0_INTPOL(x) (((x) & 3) << 0)
#define REG_HVF_CNTRL_1 REG(0x00, 0xe5) /* write */
# define HVF_CNTRL_1_FOR (1 << 0)
# define HVF_CNTRL_1_YUVBLK (1 << 1)
# define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2)
# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
# define RPT_CNTRL_REPEAT(x) ((x) & 15)
#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
# define I2S_FORMAT_PHILIPS (0 << 0)
# define I2S_FORMAT_LEFT_J (2 << 0)
# define I2S_FORMAT_RIGHT_J (3 << 0)
#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
# define AIP_CLKSEL_AIP_SPDIF (0 << 3)
# define AIP_CLKSEL_AIP_I2S (1 << 3)
# define AIP_CLKSEL_FS_ACLK (0 << 0)
# define AIP_CLKSEL_FS_MCLK (1 << 0)
# define AIP_CLKSEL_FS_FS64SPDIF (2 << 0)
/* Page 02h: PLL settings */
#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
# define PLL_SERIAL_1_SRL_FDN (1 << 0)
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
# define PLL_SERIAL_3_SRL_DE (1 << 2)
# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
#define REG_SERIALIZER REG(0x02, 0x03) /* read/write */
#define REG_BUFFER_OUT REG(0x02, 0x04) /* read/write */
#define REG_PLL_SCG1 REG(0x02, 0x05) /* read/write */
#define REG_PLL_SCG2 REG(0x02, 0x06) /* read/write */
#define REG_PLL_SCGN1 REG(0x02, 0x07) /* read/write */
#define REG_PLL_SCGN2 REG(0x02, 0x08) /* read/write */
#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
# define AUDIO_DIV_SERCLK_1 0
# define AUDIO_DIV_SERCLK_2 1
# define AUDIO_DIV_SERCLK_4 2
# define AUDIO_DIV_SERCLK_8 3
# define AUDIO_DIV_SERCLK_16 4
# define AUDIO_DIV_SERCLK_32 5
#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
# define SEL_CLK_SEL_CLK1 (1 << 0)
# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
# define SEL_CLK_ENA_SC_CLK (1 << 3)
#define REG_ANA_GENERAL REG(0x02, 0x12) /* read/write */
/* Page 09h: EDID Control */
#define REG_EDID_DATA_0 REG(0x09, 0x00) /* read */
/* next 127 successive registers are the EDID block */
#define REG_EDID_CTRL REG(0x09, 0xfa) /* read/write */
#define REG_DDC_ADDR REG(0x09, 0xfb) /* read/write */
#define REG_DDC_OFFS REG(0x09, 0xfc) /* read/write */
#define REG_DDC_SEGM_ADDR REG(0x09, 0xfd) /* read/write */
#define REG_DDC_SEGM REG(0x09, 0xfe) /* read/write */
/* Page 10h: information frames and packets */
#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
/* Page 11h: audio settings and content info packets */
#define REG_AIP_CNTRL_0 REG(0x11, 0x00) /* read/write */
# define AIP_CNTRL_0_RST_FIFO (1 << 0)
# define AIP_CNTRL_0_SWAP (1 << 1)
# define AIP_CNTRL_0_LAYOUT (1 << 2)
# define AIP_CNTRL_0_ACR_MAN (1 << 5)
# define AIP_CNTRL_0_RST_CTS (1 << 6)
#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
# define CA_I2S_HBR_CHSTAT (1 << 6)
#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
# define CTS_N_K(x) (((x) & 7) << 0)
# define CTS_N_M(x) (((x) & 3) << 4)
#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
# define ENC_CNTRL_RST_ENC (1 << 0)
# define ENC_CNTRL_RST_SEL (1 << 1)
# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
# define DIP_FLAGS_ACR (1 << 0)
# define DIP_FLAGS_GC (1 << 1)
#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
# define DIP_IF_FLAGS_IF1 (1 << 1)
# define DIP_IF_FLAGS_IF2 (1 << 2)
# define DIP_IF_FLAGS_IF3 (1 << 3)
# define DIP_IF_FLAGS_IF4 (1 << 4)
# define DIP_IF_FLAGS_IF5 (1 << 5)
#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
/* Page 12h: HDCP and OTP */
#define REG_TX3 REG(0x12, 0x9a) /* read/write */
#define REG_TX4 REG(0x12, 0x9b) /* read/write */
# define TX4_PD_RAM (1 << 1)
#define REG_TX33 REG(0x12, 0xb8) /* read/write */
# define TX33_HDMI (1 << 1)
/* Page 13h: Gamut related metadata packets */
/* CEC registers: (not paged)
*/
#define REG_CEC_INTSTATUS 0xee /* read */
# define CEC_INTSTATUS_CEC (1 << 0)
# define CEC_INTSTATUS_HDMI (1 << 1)
#define REG_CEC_CAL_XOSC_CTRL1 0xf2
# define CEC_CAL_XOSC_CTRL1_ENA_CAL BIT(0)
#define REG_CEC_DES_FREQ2 0xf5
# define CEC_DES_FREQ2_DIS_AUTOCAL BIT(7)
#define REG_CEC_CLK 0xf6
# define CEC_CLK_FRO 0x11
#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
#define REG_CEC_RXSHPDINTENA 0xfc /* read/write */
#define REG_CEC_RXSHPDINT 0xfd /* read */
# define CEC_RXSHPDINT_RXSENS BIT(0)
# define CEC_RXSHPDINT_HPD BIT(1)
#define REG_CEC_RXSHPDLEV 0xfe /* read */
# define CEC_RXSHPDLEV_RXSENS (1 << 0)
# define CEC_RXSHPDLEV_HPD (1 << 1)
#define REG_CEC_ENAMODS 0xff /* read/write */
# define CEC_ENAMODS_EN_CEC_CLK (1 << 7)
# define CEC_ENAMODS_DIS_FRO (1 << 6)
# define CEC_ENAMODS_DIS_CCLK (1 << 5)
# define CEC_ENAMODS_EN_RXSENS (1 << 2)
# define CEC_ENAMODS_EN_HDMI (1 << 1)
# define CEC_ENAMODS_EN_CEC (1 << 0)
/* Device versions: */
#define TDA9989N2 0x0101
#define TDA19989 0x0201
#define TDA19989N2 0x0202
#define TDA19988 0x0301
static void
cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
{
u8 buf[] = {addr, val};
struct i2c_msg msg = {
.addr = priv->cec_addr,
.len = 2,
.buf = buf,
};
int ret;
ret = i2c_transfer(priv->hdmi->adapter, &msg, 1);
if (ret < 0)
dev_err(&priv->hdmi->dev, "Error %d writing to cec:0x%x\n",
ret, addr);
}
static u8
cec_read(struct tda998x_priv *priv, u8 addr)
{
u8 val;
struct i2c_msg msg[2] = {
{
.addr = priv->cec_addr,
.len = 1,
.buf = &addr,
}, {
.addr = priv->cec_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &val,
},
};
int ret;
ret = i2c_transfer(priv->hdmi->adapter, msg, ARRAY_SIZE(msg));
if (ret < 0) {
dev_err(&priv->hdmi->dev, "Error %d reading from cec:0x%x\n",
ret, addr);
val = 0;
}
return val;
}
static void cec_enamods(struct tda998x_priv *priv, u8 mods, bool enable)
{
int val = cec_read(priv, REG_CEC_ENAMODS);
if (val < 0)
return;
if (enable)
val |= mods;
else
val &= ~mods;
cec_write(priv, REG_CEC_ENAMODS, val);
}
static void tda998x_cec_set_calibration(struct tda998x_priv *priv, bool enable)
{
if (enable) {
u8 val;
cec_write(priv, 0xf3, 0xc0);
cec_write(priv, 0xf4, 0xd4);
/* Enable automatic calibration mode */
val = cec_read(priv, REG_CEC_DES_FREQ2);
val &= ~CEC_DES_FREQ2_DIS_AUTOCAL;
cec_write(priv, REG_CEC_DES_FREQ2, val);
/* Enable free running oscillator */
cec_write(priv, REG_CEC_CLK, CEC_CLK_FRO);
cec_enamods(priv, CEC_ENAMODS_DIS_FRO, false);
cec_write(priv, REG_CEC_CAL_XOSC_CTRL1,
CEC_CAL_XOSC_CTRL1_ENA_CAL);
} else {
cec_write(priv, REG_CEC_CAL_XOSC_CTRL1, 0);
}
}
/*
* Calibration for the internal oscillator: we need to set calibration mode,
* and then pulse the IRQ line low for a 10ms ± 1% period.
*/
static void tda998x_cec_calibration(struct tda998x_priv *priv)
{
struct gpio_desc *calib = priv->calib;
mutex_lock(&priv->edid_mutex);
if (priv->hdmi->irq > 0)
disable_irq(priv->hdmi->irq);
gpiod_direction_output(calib, 1);
tda998x_cec_set_calibration(priv, true);
local_irq_disable();
gpiod_set_value(calib, 0);
mdelay(10);
gpiod_set_value(calib, 1);
local_irq_enable();
tda998x_cec_set_calibration(priv, false);
gpiod_direction_input(calib);
if (priv->hdmi->irq > 0)
enable_irq(priv->hdmi->irq);
mutex_unlock(&priv->edid_mutex);
}
static int tda998x_cec_hook_init(void *data)
{
struct tda998x_priv *priv = data;
struct gpio_desc *calib;
calib = gpiod_get(&priv->hdmi->dev, "nxp,calib", GPIOD_ASIS);
if (IS_ERR(calib)) {
dev_warn(&priv->hdmi->dev, "failed to get calibration gpio: %ld\n",
PTR_ERR(calib));
return PTR_ERR(calib);
}
priv->calib = calib;
return 0;
}
static void tda998x_cec_hook_exit(void *data)
{
struct tda998x_priv *priv = data;
gpiod_put(priv->calib);
priv->calib = NULL;
}
static int tda998x_cec_hook_open(void *data)
{
struct tda998x_priv *priv = data;
cec_enamods(priv, CEC_ENAMODS_EN_CEC_CLK | CEC_ENAMODS_EN_CEC, true);
tda998x_cec_calibration(priv);
return 0;
}
static void tda998x_cec_hook_release(void *data)
{
struct tda998x_priv *priv = data;
cec_enamods(priv, CEC_ENAMODS_EN_CEC_CLK | CEC_ENAMODS_EN_CEC, false);
}
static int
set_page(struct tda998x_priv *priv, u16 reg)
{
if (REG2PAGE(reg) != priv->current_page) {
struct i2c_client *client = priv->hdmi;
u8 buf[] = {
REG_CURPAGE, REG2PAGE(reg)
};
int ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) {
dev_err(&client->dev, "%s %04x err %d\n", __func__,
reg, ret);
return ret;
}
priv->current_page = REG2PAGE(reg);
}
return 0;
}
static int
reg_read_range(struct tda998x_priv *priv, u16 reg, char *buf, int cnt)
{
struct i2c_client *client = priv->hdmi;
u8 addr = REG2ADDR(reg);
int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
goto fail;
ret = i2c_master_recv(client, buf, cnt);
if (ret < 0)
goto fail;
goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
return ret;
}
#define MAX_WRITE_RANGE_BUF 32
static void
reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
{
struct i2c_client *client = priv->hdmi;
/* This is the maximum size of the buffer passed in */
u8 buf[MAX_WRITE_RANGE_BUF + 1];
int ret;
if (cnt > MAX_WRITE_RANGE_BUF) {
dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
MAX_WRITE_RANGE_BUF);
return;
}
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
}
static int
reg_read(struct tda998x_priv *priv, u16 reg)
{
u8 val = 0;
int ret;
ret = reg_read_range(priv, reg, &val, sizeof(val));
if (ret < 0)
return ret;
return val;
}
static void
reg_write(struct tda998x_priv *priv, u16 reg, u8 val)
{
struct i2c_client *client = priv->hdmi;
u8 buf[] = {REG2ADDR(reg), val};
int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
}
static void
reg_write16(struct tda998x_priv *priv, u16 reg, u16 val)
{
struct i2c_client *client = priv->hdmi;
u8 buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
}
static void
reg_set(struct tda998x_priv *priv, u16 reg, u8 val)
{
int old_val;
old_val = reg_read(priv, reg);
if (old_val >= 0)
reg_write(priv, reg, old_val | val);
}
static void
reg_clear(struct tda998x_priv *priv, u16 reg, u8 val)
{
int old_val;
old_val = reg_read(priv, reg);
if (old_val >= 0)
reg_write(priv, reg, old_val & ~val);
}
static void
tda998x_reset(struct tda998x_priv *priv)
{
/* reset audio and i2c master: */
reg_write(priv, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
msleep(50);
reg_write(priv, REG_SOFTRESET, 0);
msleep(50);
/* reset transmitter: */
reg_set(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
reg_clear(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
/* PLL registers common configuration */
reg_write(priv, REG_PLL_SERIAL_1, 0x00);
reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
reg_write(priv, REG_PLL_SERIAL_3, 0x00);
reg_write(priv, REG_SERIALIZER, 0x00);
reg_write(priv, REG_BUFFER_OUT, 0x00);
reg_write(priv, REG_PLL_SCG1, 0x00);
reg_write(priv, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
reg_write(priv, REG_PLL_SCGN1, 0xfa);
reg_write(priv, REG_PLL_SCGN2, 0x00);
reg_write(priv, REG_PLL_SCGR1, 0x5b);
reg_write(priv, REG_PLL_SCGR2, 0x00);
reg_write(priv, REG_PLL_SCG2, 0x10);
/* Write the default value MUX register */
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
/*
* The TDA998x has a problem when trying to read the EDID close to a
* HPD assertion: it needs a delay of 100ms to avoid timing out while
* trying to read EDID data.
*
* However, tda998x_connector_get_modes() may be called at any moment
* after tda998x_connector_detect() indicates that we are connected, so
* we need to delay probing modes in tda998x_connector_get_modes() after
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
static void tda998x_edid_delay_done(struct timer_list *t)
{
struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
schedule_work(&priv->detect_work);
}
static void tda998x_edid_delay_start(struct tda998x_priv *priv)
{
priv->edid_delay_active = true;
mod_timer(&priv->edid_delay_timer, jiffies + HZ/10);
}
static int tda998x_edid_delay_wait(struct tda998x_priv *priv)
{
return wait_event_killable(priv->edid_delay_waitq, !priv->edid_delay_active);
}
/*
* We need to run the KMS hotplug event helper outside of our threaded
* interrupt routine as this can call back into our get_modes method,
* which will want to make use of interrupts.
*/
static void tda998x_detect_work(struct work_struct *work)
{
struct tda998x_priv *priv =
container_of(work, struct tda998x_priv, detect_work);
struct drm_device *dev = priv->connector.dev;
if (dev)
drm_kms_helper_hotplug_event(dev);
}
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
static irqreturn_t tda998x_irq_thread(int irq, void *data)
{
struct tda998x_priv *priv = data;
u8 sta, cec, lvl, flag0, flag1, flag2;
bool handled = false;
sta = cec_read(priv, REG_CEC_INTSTATUS);
if (sta & CEC_INTSTATUS_HDMI) {
cec = cec_read(priv, REG_CEC_RXSHPDINT);
lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
flag0 = reg_read(priv, REG_INT_FLAGS_0);
flag1 = reg_read(priv, REG_INT_FLAGS_1);
flag2 = reg_read(priv, REG_INT_FLAGS_2);
DRM_DEBUG_DRIVER(
"tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
sta, cec, lvl, flag0, flag1, flag2);
if (cec & CEC_RXSHPDINT_HPD) {
if (lvl & CEC_RXSHPDLEV_HPD) {
tda998x_edid_delay_start(priv);
} else {
schedule_work(&priv->detect_work);
cec_notifier_phys_addr_invalidate(
priv->cec_notify);
}
handled = true;
}
if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
handled = true;
}
}
return IRQ_RETVAL(handled);
}
static void
tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
union hdmi_infoframe *frame)
{
u8 buf[MAX_WRITE_RANGE_BUF];
ssize_t len;
len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
if (len < 0) {
dev_err(&priv->hdmi->dev,
"hdmi_infoframe_pack() type=0x%02x failed: %zd\n",
frame->any.type, len);
return;
}
reg_clear(priv, REG_DIP_IF_FLAGS, bit);
reg_write_range(priv, addr, buf, len);
reg_set(priv, REG_DIP_IF_FLAGS, bit);
}
static void tda998x_write_aif(struct tda998x_priv *priv,
const struct hdmi_audio_infoframe *cea)
{
union hdmi_infoframe frame;
frame.audio = *cea;
tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
}
static void
tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&priv->connector, mode);
frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
drm_hdmi_avi_infoframe_quant_range(&frame.avi, &priv->connector, mode,
priv->rgb_quant_range);
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
}
static void tda998x_write_vsi(struct tda998x_priv *priv,
const struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
if (drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
&priv->connector,
mode))
reg_clear(priv, REG_DIP_IF_FLAGS, DIP_IF_FLAGS_IF1);
else
tda998x_write_if(priv, DIP_IF_FLAGS_IF1, REG_IF1_HB0, &frame);
}
/* Audio support */
static const struct tda998x_audio_route tda998x_audio_route[AUDIO_ROUTE_NUM] = {
[AUDIO_ROUTE_I2S] = {
.ena_aclk = 1,
.mux_ap = MUX_AP_SELECT_I2S,
.aip_clksel = AIP_CLKSEL_AIP_I2S | AIP_CLKSEL_FS_ACLK,
},
[AUDIO_ROUTE_SPDIF] = {
.ena_aclk = 0,
.mux_ap = MUX_AP_SELECT_SPDIF,
.aip_clksel = AIP_CLKSEL_AIP_SPDIF | AIP_CLKSEL_FS_FS64SPDIF,
},
};
/* Configure the TDA998x audio data and clock routing. */
static int tda998x_derive_routing(struct tda998x_priv *priv,
struct tda998x_audio_settings *s,
unsigned int route)
{
s->route = &tda998x_audio_route[route];
s->ena_ap = priv->audio_port_enable[route];
if (s->ena_ap == 0) {
dev_err(&priv->hdmi->dev, "no audio configuration found\n");
return -EINVAL;
}
return 0;
}
/*
* The audio clock divisor register controls a divider producing Audio_Clk_Out
* from SERclk by dividing it by 2^n where 0 <= n <= 5. We don't know what
* Audio_Clk_Out or SERclk are. We guess SERclk is the same as TMDS clock.
*
* It seems that Audio_Clk_Out must be the smallest value that is greater
* than 128*fs, otherwise audio does not function. There is some suggestion
* that 126*fs is a better value.
*/
static u8 tda998x_get_adiv(struct tda998x_priv *priv, unsigned int fs)
{
unsigned long min_audio_clk = fs * 128;
unsigned long ser_clk = priv->tmds_clock * 1000;
u8 adiv;
for (adiv = AUDIO_DIV_SERCLK_32; adiv != AUDIO_DIV_SERCLK_1; adiv--)
if (ser_clk > min_audio_clk << adiv)
break;
dev_dbg(&priv->hdmi->dev,
"ser_clk=%luHz fs=%uHz min_aclk=%luHz adiv=%d\n",
ser_clk, fs, min_audio_clk, adiv);
return adiv;
}
/*
* In auto-CTS mode, the TDA998x uses a "measured time stamp" counter to
* generate the CTS value. It appears that the "measured time stamp" is
* the number of TDMS clock cycles within a number of audio input clock
* cycles defined by the k and N parameters defined below, in a similar
* way to that which is set out in the CTS generation in the HDMI spec.
*
* tmdsclk ----> mts -> /m ---> CTS
* ^
* sclk -> /k -> /N
*
* CTS = mts / m, where m is 2^M.
* /k is a divider based on the K value below, K+1 for K < 4, or 8 for K >= 4
* /N is a divider based on the HDMI specified N value.
*
* This produces the following equation:
* CTS = tmds_clock * k * N / (sclk * m)
*
* When combined with the sink-side equation, and realising that sclk is
* bclk_ratio * fs, we end up with:
* k = m * bclk_ratio / 128.
*
* Note: S/PDIF always uses a bclk_ratio of 64.
*/
static int tda998x_derive_cts_n(struct tda998x_priv *priv,
struct tda998x_audio_settings *settings,
unsigned int ratio)
{
switch (ratio) {
case 16:
settings->cts_n = CTS_N_M(3) | CTS_N_K(0);
break;
case 32:
settings->cts_n = CTS_N_M(3) | CTS_N_K(1);
break;
case 48:
settings->cts_n = CTS_N_M(3) | CTS_N_K(2);
break;
case 64:
settings->cts_n = CTS_N_M(3) | CTS_N_K(3);
break;
case 128:
settings->cts_n = CTS_N_M(0) | CTS_N_K(0);
break;
default:
dev_err(&priv->hdmi->dev, "unsupported bclk ratio %ufs\n",
ratio);
return -EINVAL;
}
return 0;
}
static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
{
if (on) {
reg_set(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
reg_clear(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
} else {
reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
}
}
static void tda998x_configure_audio(struct tda998x_priv *priv)
{
const struct tda998x_audio_settings *settings = &priv->audio;
u8 buf[6], adiv;
u32 n;
/* If audio is not configured, there is nothing to do. */
if (settings->ena_ap == 0)
return;
adiv = tda998x_get_adiv(priv, settings->sample_rate);
/* Enable audio ports */
reg_write(priv, REG_ENA_AP, settings->ena_ap);
reg_write(priv, REG_ENA_ACLK, settings->route->ena_aclk);
reg_write(priv, REG_MUX_AP, settings->route->mux_ap);
reg_write(priv, REG_I2S_FORMAT, settings->i2s_format);
reg_write(priv, REG_AIP_CLKSEL, settings->route->aip_clksel);
reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT |
AIP_CNTRL_0_ACR_MAN); /* auto CTS */
reg_write(priv, REG_CTS_N, settings->cts_n);
reg_write(priv, REG_AUDIO_DIV, adiv);
/*
* This is the approximate value of N, which happens to be
* the recommended values for non-coherent clocks.
*/
n = 128 * settings->sample_rate / 1000;
/* Write the CTS and N values */
buf[0] = 0x44;
buf[1] = 0x42;
buf[2] = 0x01;
buf[3] = n;
buf[4] = n >> 8;
buf[5] = n >> 16;
reg_write_range(priv, REG_ACR_CTS_0, buf, 6);
/* Reset CTS generator */
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
/* Write the channel status
* The REG_CH_STAT_B-registers skip IEC958 AES2 byte, because
* there is a separate register for each I2S wire.
*/
buf[0] = settings->status[0];
buf[1] = settings->status[1];
buf[2] = settings->status[3];
buf[3] = settings->status[4];
reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
tda998x_audio_mute(priv, true);
msleep(20);
tda998x_audio_mute(priv, false);
tda998x_write_aif(priv, &settings->cea);
}
static int tda998x_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
unsigned int bclk_ratio;
bool spdif = daifmt->fmt == HDMI_SPDIF;
int ret;
struct tda998x_audio_settings audio = {
.sample_rate = params->sample_rate,
.cea = params->cea,
};
memcpy(audio.status, params->iec.status,
min(sizeof(audio.status), sizeof(params->iec.status)));
switch (daifmt->fmt) {
case HDMI_I2S:
audio.i2s_format = I2S_FORMAT_PHILIPS;
break;
case HDMI_LEFT_J:
audio.i2s_format = I2S_FORMAT_LEFT_J;
break;
case HDMI_RIGHT_J:
audio.i2s_format = I2S_FORMAT_RIGHT_J;
break;
case HDMI_SPDIF:
audio.i2s_format = 0;
break;
default:
dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
return -EINVAL;
}
if (!spdif &&
(daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
daifmt->bit_clk_provider || daifmt->frame_clk_provider)) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
daifmt->bit_clk_provider,
daifmt->frame_clk_provider);
return -EINVAL;
}
ret = tda998x_derive_routing(priv, &audio, AUDIO_ROUTE_I2S + spdif);
if (ret < 0)
return ret;
bclk_ratio = spdif ? 64 : params->sample_width * 2;
ret = tda998x_derive_cts_n(priv, &audio, bclk_ratio);
if (ret < 0)
return ret;
mutex_lock(&priv->audio_mutex);
priv->audio = audio;
if (priv->supports_infoframes && priv->sink_has_audio)
tda998x_configure_audio(priv);
mutex_unlock(&priv->audio_mutex);
return 0;
}
static void tda998x_audio_shutdown(struct device *dev, void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
mutex_lock(&priv->audio_mutex);
reg_write(priv, REG_ENA_AP, 0);
priv->audio.ena_ap = 0;
mutex_unlock(&priv->audio_mutex);
}
static int tda998x_audio_mute_stream(struct device *dev, void *data,
bool enable, int direction)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
mutex_lock(&priv->audio_mutex);
tda998x_audio_mute(priv, enable);
mutex_unlock(&priv->audio_mutex);
return 0;
}
static int tda998x_audio_get_eld(struct device *dev, void *data,
uint8_t *buf, size_t len)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
mutex_lock(&priv->audio_mutex);
memcpy(buf, priv->connector.eld,
min(sizeof(priv->connector.eld), len));
mutex_unlock(&priv->audio_mutex);
return 0;
}
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = tda998x_audio_hw_params,
.audio_shutdown = tda998x_audio_shutdown,
.mute_stream = tda998x_audio_mute_stream,
.get_eld = tda998x_audio_get_eld,
.no_capture_mute = 1,
};
static int tda998x_audio_codec_init(struct tda998x_priv *priv,
struct device *dev)
{
struct hdmi_codec_pdata codec_data = {
.ops = &audio_codec_ops,
.max_i2s_channels = 2,
.no_i2s_capture = 1,
.no_spdif_capture = 1,
};
if (priv->audio_port_enable[AUDIO_ROUTE_I2S])
codec_data.i2s = 1;
if (priv->audio_port_enable[AUDIO_ROUTE_SPDIF])
codec_data.spdif = 1;
priv->audio_pdev = platform_device_register_data(
dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
&codec_data, sizeof(codec_data));
return PTR_ERR_OR_ZERO(priv->audio_pdev);
}
/* DRM connector functions */
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
connector_status_disconnected;
}
static void tda998x_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
{
struct tda998x_priv *priv = data;
u8 offset, segptr;
int ret, i;
offset = (blk & 1) ? 128 : 0;
segptr = blk / 2;
mutex_lock(&priv->edid_mutex);
reg_write(priv, REG_DDC_ADDR, 0xa0);
reg_write(priv, REG_DDC_OFFS, offset);
reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
reg_write(priv, REG_DDC_SEGM, segptr);
/* enable reading EDID: */
priv->wq_edid_wait = 1;
reg_write(priv, REG_EDID_CTRL, 0x1);
/* flag must be cleared by sw: */
reg_write(priv, REG_EDID_CTRL, 0x0);
/* wait for block read to complete: */
if (priv->hdmi->irq) {
i = wait_event_timeout(priv->wq_edid,
!priv->wq_edid_wait,
msecs_to_jiffies(100));
if (i < 0) {
dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
ret = i;
goto failed;
}
} else {
for (i = 100; i > 0; i--) {
msleep(1);
ret = reg_read(priv, REG_INT_FLAGS_2);
if (ret < 0)
goto failed;
if (ret & INT_FLAGS_2_EDID_BLK_RD)
break;
}
}
if (i == 0) {
dev_err(&priv->hdmi->dev, "read edid timeout\n");
ret = -ETIMEDOUT;
goto failed;
}
ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
if (ret != length) {
dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
blk, ret);
goto failed;
}
ret = 0;
failed:
mutex_unlock(&priv->edid_mutex);
return ret;
}
static int tda998x_connector_get_modes(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
struct edid *edid;
int n;
/*
* If we get killed while waiting for the HPD timeout, return
* no modes found: we are not in a restartable path, so we
* can't handle signals gracefully.
*/
if (tda998x_edid_delay_wait(priv))
return 0;
if (priv->rev == TDA19988)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
edid = drm_do_get_edid(connector, read_edid_block, priv);
if (priv->rev == TDA19988)
reg_set(priv, REG_TX4, TX4_PD_RAM);
if (!edid) {
dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
return 0;
}
drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
mutex_lock(&priv->audio_mutex);
n = drm_add_edid_modes(connector, edid);
priv->sink_has_audio = drm_detect_monitor_audio(edid);
mutex_unlock(&priv->audio_mutex);
kfree(edid);
return n;
}
static struct drm_encoder *
tda998x_connector_best_encoder(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
return priv->bridge.encoder;
}
static
const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
.get_modes = tda998x_connector_get_modes,
.best_encoder = tda998x_connector_best_encoder,
};
static int tda998x_connector_init(struct tda998x_priv *priv,
struct drm_device *drm)
{
struct drm_connector *connector = &priv->connector;
int ret;
connector->interlace_allowed = 1;
if (priv->hdmi->irq)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_helper_add(connector, &tda998x_connector_helper_funcs);
ret = drm_connector_init(drm, connector, &tda998x_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
if (ret)
return ret;
drm_connector_attach_encoder(&priv->connector,
priv->bridge.encoder);
return 0;
}
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
return tda998x_connector_init(priv, bridge->dev);
}
static void tda998x_bridge_detach(struct drm_bridge *bridge)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
drm_connector_cleanup(&priv->connector);
}
static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
/* TDA19988 dotclock can go up to 165MHz */
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
return MODE_CLOCK_HIGH;
if (mode->htotal >= BIT(13))
return MODE_BAD_HVALUE;
if (mode->vtotal >= BIT(11))
return MODE_BAD_VVALUE;
return MODE_OK;
}
static void tda998x_bridge_enable(struct drm_bridge *bridge)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
if (!priv->is_on) {
/* enable video ports, audio will be enabled later */
reg_write(priv, REG_ENA_VP_0, 0xff);
reg_write(priv, REG_ENA_VP_1, 0xff);
reg_write(priv, REG_ENA_VP_2, 0xff);
/* set muxing after enabling ports: */
reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
priv->is_on = true;
}
}
static void tda998x_bridge_disable(struct drm_bridge *bridge)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
if (priv->is_on) {
/* disable video ports */
reg_write(priv, REG_ENA_VP_0, 0x00);
reg_write(priv, REG_ENA_VP_1, 0x00);
reg_write(priv, REG_ENA_VP_2, 0x00);
priv->is_on = false;
}
}
static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
unsigned long tmds_clock;
u16 ref_pix, ref_line, n_pix, n_line;
u16 hs_pix_s, hs_pix_e;
u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
u16 vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
u16 vwin1_line_s, vwin1_line_e;
u16 vwin2_line_s, vwin2_line_e;
u16 de_pix_s, de_pix_e;
u8 reg, div, rep, sel_clk;
/*
* Since we are "computer" like, our source invariably produces
* full-range RGB. If the monitor supports full-range, then use
* it, otherwise reduce to limited-range.
*/
priv->rgb_quant_range =
priv->connector.display_info.rgb_quant_range_selectable ?
HDMI_QUANTIZATION_RANGE_FULL :
drm_default_rgb_quant_range(adjusted_mode);
/*
* Internally TDA998x is using ITU-R BT.656 style sync but
* we get VESA style sync. TDA998x is using a reference pixel
* relative to ITU to sync to the input frame and for output
* sync generation. Currently, we are using reference detection
* from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
* which is position of rising VS with coincident rising HS.
*
* Now there is some issues to take care of:
* - HDMI data islands require sync-before-active
* - TDA998x register values must be > 0 to be enabled
* - REFLINE needs an additional offset of +1
* - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
*
* So we add +1 to all horizontal and vertical register values,
* plus an additional +3 for REFPIX as we are using RGB input only.
*/
n_pix = mode->htotal;
n_line = mode->vtotal;
hs_pix_e = mode->hsync_end - mode->hdisplay;
hs_pix_s = mode->hsync_start - mode->hdisplay;
de_pix_e = mode->htotal;
de_pix_s = mode->htotal - mode->hdisplay;
ref_pix = 3 + hs_pix_s;
/*
* Attached LCD controllers may generate broken sync. Allow
* those to adjust the position of the rising VS edge by adding
* HSKEW to ref_pix.
*/
if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
ref_pix += adjusted_mode->hskew;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
ref_line = 1 + mode->vsync_start - mode->vdisplay;
vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
vwin1_line_e = vwin1_line_s + mode->vdisplay;
vs1_pix_s = vs1_pix_e = hs_pix_s;
vs1_line_s = mode->vsync_start - mode->vdisplay;
vs1_line_e = vs1_line_s +
mode->vsync_end - mode->vsync_start;
vwin2_line_s = vwin2_line_e = 0;
vs2_pix_s = vs2_pix_e = 0;
vs2_line_s = vs2_line_e = 0;
} else {
ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
vs1_pix_s = vs1_pix_e = hs_pix_s;
vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
vs1_line_e = vs1_line_s +
(mode->vsync_end - mode->vsync_start)/2;
vwin2_line_s = vwin1_line_s + mode->vtotal/2;
vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
vs2_line_s = vs1_line_s + mode->vtotal/2 ;
vs2_line_e = vs2_line_s +
(mode->vsync_end - mode->vsync_start)/2;
}
/*
* Select pixel repeat depending on the double-clock flag
* (which means we have to repeat each pixel once.)
*/
rep = mode->flags & DRM_MODE_FLAG_DBLCLK ? 1 : 0;
sel_clk = SEL_CLK_ENA_SC_CLK | SEL_CLK_SEL_CLK1 |
SEL_CLK_SEL_VRF_CLK(rep ? 2 : 0);
/* the TMDS clock is scaled up by the pixel repeat */
tmds_clock = mode->clock * (1 + rep);
/*
* The divisor is power-of-2. The TDA9983B datasheet gives
* this as ranges of Msample/s, which is 10x the TMDS clock:
* 0 - 800 to 1500 Msample/s
* 1 - 400 to 800 Msample/s
* 2 - 200 to 400 Msample/s
* 3 - as 2 above
*/
for (div = 0; div < 3; div++)
if (80000 >> div <= tmds_clock)
break;
mutex_lock(&priv->audio_mutex);
priv->tmds_clock = tmds_clock;
/* mute the audio FIFO: */
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
/* set HDMI HDCP mode off: */
reg_write(priv, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
reg_clear(priv, REG_TX33, TX33_HDMI);
reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
/* no pre-filter or interpolator: */
reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
HVF_CNTRL_0_INTPOL(0));
reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_PREFILT);
reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
VIP_CNTRL_4_BLC(0));
reg_clear(priv, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
reg_clear(priv, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR |
PLL_SERIAL_3_SRL_DE);
reg_write(priv, REG_SERIALIZER, 0);
reg_write(priv, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
reg_write(priv, REG_RPT_CNTRL, RPT_CNTRL_REPEAT(rep));
reg_write(priv, REG_SEL_CLK, sel_clk);
reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
PLL_SERIAL_2_SRL_PR(rep));
/* set color matrix according to output rgb quant range */
if (priv->rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) {
static u8 tda998x_full_to_limited_range[] = {
MAT_CONTRL_MAT_SC(2),
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x03, 0x6f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x03, 0x6f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0x6f,
0x00, 0x40, 0x00, 0x40, 0x00, 0x40
};
reg_clear(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_CSC);
reg_write_range(priv, REG_MAT_CONTRL,
tda998x_full_to_limited_range,
sizeof(tda998x_full_to_limited_range));
} else {
reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
MAT_CONTRL_MAT_SC(1));
reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_CSC);
}
/* set BIAS tmds value: */
reg_write(priv, REG_ANA_GENERAL, 0x09);
/*
* Sync on rising HSYNC/VSYNC
*/
reg = VIP_CNTRL_3_SYNC_HS;
/*
* TDA19988 requires high-active sync at input stage,
* so invert low-active sync provided by master encoder here
*/
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
reg |= VIP_CNTRL_3_H_TGL;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
reg |= VIP_CNTRL_3_V_TGL;
reg_write(priv, REG_VIP_CNTRL_3, reg);
reg_write(priv, REG_VIDFORMAT, 0x00);
reg_write16(priv, REG_REFPIX_MSB, ref_pix);
reg_write16(priv, REG_REFLINE_MSB, ref_line);
reg_write16(priv, REG_NPIX_MSB, n_pix);
reg_write16(priv, REG_NLINE_MSB, n_line);
reg_write16(priv, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
reg_write16(priv, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
reg_write16(priv, REG_VS_LINE_END_1_MSB, vs1_line_e);
reg_write16(priv, REG_VS_PIX_END_1_MSB, vs1_pix_e);
reg_write16(priv, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
reg_write16(priv, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
reg_write16(priv, REG_VS_LINE_END_2_MSB, vs2_line_e);
reg_write16(priv, REG_VS_PIX_END_2_MSB, vs2_pix_e);
reg_write16(priv, REG_HS_PIX_START_MSB, hs_pix_s);
reg_write16(priv, REG_HS_PIX_STOP_MSB, hs_pix_e);
reg_write16(priv, REG_VWIN_START_1_MSB, vwin1_line_s);
reg_write16(priv, REG_VWIN_END_1_MSB, vwin1_line_e);
reg_write16(priv, REG_VWIN_START_2_MSB, vwin2_line_s);
reg_write16(priv, REG_VWIN_END_2_MSB, vwin2_line_e);
reg_write16(priv, REG_DE_START_MSB, de_pix_s);
reg_write16(priv, REG_DE_STOP_MSB, de_pix_e);
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
reg_write(priv, REG_ENABLE_SPACE, 0x00);
}
/*
* Always generate sync polarity relative to input sync and
* revert input stage toggled sync at output stage
*/
reg = TBG_CNTRL_1_DWIN_DIS | TBG_CNTRL_1_TGL_EN;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
reg |= TBG_CNTRL_1_H_TGL;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
reg |= TBG_CNTRL_1_V_TGL;
reg_write(priv, REG_TBG_CNTRL_1, reg);
/* must be last register set: */
reg_write(priv, REG_TBG_CNTRL_0, 0);
/* CEA-861B section 6 says that:
* CEA version 1 (CEA-861) has no support for infoframes.
* CEA version 2 (CEA-861A) supports version 1 AVI infoframes,
* and optional basic audio.
* CEA version 3 (CEA-861B) supports version 1 and 2 AVI infoframes,
* and optional digital audio, with audio infoframes.
*
* Since we only support generation of version 2 AVI infoframes,
* ignore CEA version 2 and below (iow, behave as if we're a
* CEA-861 source.)
*/
priv->supports_infoframes = priv->connector.display_info.cea_rev >= 3;
if (priv->supports_infoframes) {
/* We need to turn HDMI HDCP stuff on to get audio through */
reg &= ~TBG_CNTRL_1_DWIN_DIS;
reg_write(priv, REG_TBG_CNTRL_1, reg);
reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
reg_set(priv, REG_TX33, TX33_HDMI);
tda998x_write_avi(priv, adjusted_mode);
tda998x_write_vsi(priv, adjusted_mode);
if (priv->sink_has_audio)
tda998x_configure_audio(priv);
}
mutex_unlock(&priv->audio_mutex);
}
static const struct drm_bridge_funcs tda998x_bridge_funcs = {
.attach = tda998x_bridge_attach,
.detach = tda998x_bridge_detach,
.mode_valid = tda998x_bridge_mode_valid,
.disable = tda998x_bridge_disable,
.mode_set = tda998x_bridge_mode_set,
.enable = tda998x_bridge_enable,
};
/* I2C driver functions */
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
struct device_node *np)
{
const u32 *port_data;
u32 size;
int i;
port_data = of_get_property(np, "audio-ports", &size);
if (!port_data)
return 0;
size /= sizeof(u32);
if (size > 2 * ARRAY_SIZE(priv->audio_port_enable) || size % 2 != 0) {
dev_err(&priv->hdmi->dev,
"Bad number of elements in audio-ports dt-property\n");
return -EINVAL;
}
size /= 2;
for (i = 0; i < size; i++) {
unsigned int route;
u8 afmt = be32_to_cpup(&port_data[2*i]);
u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
switch (afmt) {
case AFMT_I2S:
route = AUDIO_ROUTE_I2S;
break;
case AFMT_SPDIF:
route = AUDIO_ROUTE_SPDIF;
break;
default:
dev_err(&priv->hdmi->dev,
"Bad audio format %u\n", afmt);
return -EINVAL;
}
if (!ena_ap) {
dev_err(&priv->hdmi->dev, "invalid zero port config\n");
continue;
}
if (priv->audio_port_enable[route]) {
dev_err(&priv->hdmi->dev,
"%s format already configured\n",
route == AUDIO_ROUTE_SPDIF ? "SPDIF" : "I2S");
return -EINVAL;
}
priv->audio_port_enable[route] = ena_ap;
}
return 0;
}
static int tda998x_set_config(struct tda998x_priv *priv,
const struct tda998x_encoder_params *p)
{
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
(p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
VIP_CNTRL_0_SWAP_B(p->swap_b) |
(p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
(p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
VIP_CNTRL_1_SWAP_D(p->swap_d) |
(p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
(p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
VIP_CNTRL_2_SWAP_F(p->swap_f) |
(p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
if (p->audio_params.format != AFMT_UNUSED) {
unsigned int ratio, route;
bool spdif = p->audio_params.format == AFMT_SPDIF;
route = AUDIO_ROUTE_I2S + spdif;
priv->audio.route = &tda998x_audio_route[route];
priv->audio.cea = p->audio_params.cea;
priv->audio.sample_rate = p->audio_params.sample_rate;
memcpy(priv->audio.status, p->audio_params.status,
min(sizeof(priv->audio.status),
sizeof(p->audio_params.status)));
priv->audio.ena_ap = p->audio_params.config;
priv->audio.i2s_format = I2S_FORMAT_PHILIPS;
ratio = spdif ? 64 : p->audio_params.sample_width * 2;
return tda998x_derive_cts_n(priv, &priv->audio, ratio);
}
return 0;
}
static void tda998x_destroy(struct device *dev)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
drm_bridge_remove(&priv->bridge);
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->audio_pdev)
platform_device_unregister(priv->audio_pdev);
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
del_timer_sync(&priv->edid_delay_timer);
cancel_work_sync(&priv->detect_work);
i2c_unregister_device(priv->cec);
cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct device_node *np = client->dev.of_node;
struct i2c_board_info cec_info;
struct tda998x_priv *priv;
u32 video;
int rev_lo, rev_hi, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(dev, priv);
mutex_init(&priv->mutex); /* protect the page access */
mutex_init(&priv->audio_mutex); /* protect access from audio thread */
mutex_init(&priv->edid_mutex);
INIT_LIST_HEAD(&priv->bridge.list);
init_waitqueue_head(&priv->edid_delay_waitq);
timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
/* CEC I2C address bound to TDA998x I2C addr by configuration pins */
priv->cec_addr = 0x34 + (client->addr & 0x03);
priv->current_page = 0xff;
priv->hdmi = client;
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
tda998x_reset(priv);
/* read version: */
rev_lo = reg_read(priv, REG_VERSION_LSB);
if (rev_lo < 0) {
dev_err(dev, "failed to read version: %d\n", rev_lo);
return rev_lo;
}
rev_hi = reg_read(priv, REG_VERSION_MSB);
if (rev_hi < 0) {
dev_err(dev, "failed to read version: %d\n", rev_hi);
return rev_hi;
}
priv->rev = rev_lo | rev_hi << 8;
/* mask off feature bits: */
priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
switch (priv->rev) {
case TDA9989N2:
dev_info(dev, "found TDA9989 n2");
break;
case TDA19989:
dev_info(dev, "found TDA19989");
break;
case TDA19989N2:
dev_info(dev, "found TDA19989 n2");
break;
case TDA19988:
dev_info(dev, "found TDA19988");
break;
default:
dev_err(dev, "found unsupported device: %04x\n", priv->rev);
return -ENXIO;
}
/* after reset, enable DDC: */
reg_write(priv, REG_DDC_DISABLE, 0x00);
/* set clock on DDC channel: */
reg_write(priv, REG_TX3, 39);
/* if necessary, disable multi-master: */
if (priv->rev == TDA19989)
reg_set(priv, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL,
CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
/* ensure interrupts are disabled */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
/* clear pending interrupts */
cec_read(priv, REG_CEC_RXSHPDINT);
reg_read(priv, REG_INT_FLAGS_0);
reg_read(priv, REG_INT_FLAGS_1);
reg_read(priv, REG_INT_FLAGS_2);
/* initialize the optional IRQ */
if (client->irq) {
unsigned long irq_flags;
/* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
irq_flags =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
priv->cec_glue.irq_flags = irq_flags;
irq_flags |= IRQF_SHARED | IRQF_ONESHOT;
ret = request_threaded_irq(client->irq, NULL,
tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
dev_err(dev, "failed to request IRQ#%u: %d\n",
client->irq, ret);
goto err_irq;
}
/* enable HPD irq */
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
}
priv->cec_glue.parent = dev;
priv->cec_glue.data = priv;
priv->cec_glue.init = tda998x_cec_hook_init;
priv->cec_glue.exit = tda998x_cec_hook_exit;
priv->cec_glue.open = tda998x_cec_hook_open;
priv->cec_glue.release = tda998x_cec_hook_release;
/*
* Some TDA998x are actually two I2C devices merged onto one piece
* of silicon: TDA9989 and TDA19989 combine the HDMI transmitter
* with a slightly modified TDA9950 CEC device. The CEC device
* is at the TDA9950 address, with the address pins strapped across
* to the TDA998x address pins. Hence, it always has the same
* offset.
*/
memset(&cec_info, 0, sizeof(cec_info));
strscpy(cec_info.type, "tda9950", sizeof(cec_info.type));
cec_info.addr = priv->cec_addr;
cec_info.platform_data = &priv->cec_glue;
cec_info.irq = client->irq;
priv->cec = i2c_new_client_device(client->adapter, &cec_info);
if (IS_ERR(priv->cec)) {
ret = PTR_ERR(priv->cec);
goto fail;
}
/* enable EDID read irq: */
reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (np) {
/* get the device tree parameters */
ret = of_property_read_u32(np, "video-ports", &video);
if (ret == 0) {
priv->vip_cntrl_0 = video >> 16;
priv->vip_cntrl_1 = video >> 8;
priv->vip_cntrl_2 = video;
}
ret = tda998x_get_audio_ports(priv, np);
if (ret)
goto fail;
if (priv->audio_port_enable[AUDIO_ROUTE_I2S] ||
priv->audio_port_enable[AUDIO_ROUTE_SPDIF])
tda998x_audio_codec_init(priv, &client->dev);
} else if (dev->platform_data) {
ret = tda998x_set_config(priv, dev->platform_data);
if (ret)
goto fail;
}
priv->bridge.funcs = &tda998x_bridge_funcs;
#ifdef CONFIG_OF
priv->bridge.of_node = dev->of_node;
#endif
drm_bridge_add(&priv->bridge);
return 0;
fail:
tda998x_destroy(dev);
err_irq:
return ret;
}
/* DRM encoder functions */
static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
u32 crtcs = 0;
int ret;
if (dev->of_node)
crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/* If no CRTCs were found, fall back to our old behaviour */
if (crtcs == 0) {
dev_warn(dev, "Falling back to first CRTC\n");
crtcs = 1 << 0;
}
priv->encoder.possible_crtcs = crtcs;
ret = drm_simple_encoder_init(drm, &priv->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret)
goto err_encoder;
ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
if (ret)
goto err_bridge;
return 0;
err_bridge:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
return ret;
}
static int tda998x_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
return tda998x_encoder_init(dev, drm);
}
static void tda998x_unbind(struct device *dev, struct device *master,
void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
drm_encoder_cleanup(&priv->encoder);
}
static const struct component_ops tda998x_ops = {
.bind = tda998x_bind,
.unbind = tda998x_unbind,
};
static int
tda998x_probe(struct i2c_client *client)
{
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
ret = tda998x_create(&client->dev);
if (ret)
return ret;
ret = component_add(&client->dev, &tda998x_ops);
if (ret)
tda998x_destroy(&client->dev);
return ret;
}
static void tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
tda998x_destroy(&client->dev);
}
#ifdef CONFIG_OF
static const struct of_device_id tda998x_dt_ids[] = {
{ .compatible = "nxp,tda998x", },
{ }
};
MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
static const struct i2c_device_id tda998x_ids[] = {
{ "tda998x", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
static struct i2c_driver tda998x_driver = {
.probe = tda998x_probe,
.remove = tda998x_remove,
.driver = {
.name = "tda998x",
.of_match_table = of_match_ptr(tda998x_dt_ids),
},
.id_table = tda998x_ids,
};
module_i2c_driver(tda998x_driver);
MODULE_AUTHOR("Rob Clark <[email protected]");
MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/i2c/tda998x_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kunit test for drm_probe_helper functions
*/
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_mode.h>
#include <drm/drm_modes.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <kunit/test.h>
struct drm_probe_helper_test_priv {
struct drm_device *drm;
struct device *dev;
struct drm_connector connector;
};
static const struct drm_connector_helper_funcs drm_probe_helper_connector_helper_funcs = {
};
static const struct drm_connector_funcs drm_probe_helper_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.reset = drm_atomic_helper_connector_reset,
};
static int drm_probe_helper_test_init(struct kunit *test)
{
struct drm_probe_helper_test_priv *priv;
struct drm_connector *connector;
int ret;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
test->priv = priv;
priv->dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
sizeof(*priv->drm), 0,
DRIVER_MODESET | DRIVER_ATOMIC);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
connector = &priv->connector;
ret = drmm_connector_init(priv->drm, connector,
&drm_probe_helper_connector_funcs,
DRM_MODE_CONNECTOR_Unknown,
NULL);
KUNIT_ASSERT_EQ(test, ret, 0);
drm_connector_helper_add(connector, &drm_probe_helper_connector_helper_funcs);
return 0;
}
typedef struct drm_display_mode *(*expected_mode_func_t)(struct drm_device *);
struct drm_connector_helper_tv_get_modes_test {
const char *name;
unsigned int supported_tv_modes;
enum drm_connector_tv_mode default_mode;
bool cmdline;
enum drm_connector_tv_mode cmdline_mode;
expected_mode_func_t *expected_modes;
unsigned int num_expected_modes;
};
#define _TV_MODE_TEST(_name, _supported, _default, _cmdline, _cmdline_mode, ...) \
{ \
.name = _name, \
.supported_tv_modes = _supported, \
.default_mode = _default, \
.cmdline = _cmdline, \
.cmdline_mode = _cmdline_mode, \
.expected_modes = (expected_mode_func_t[]) { __VA_ARGS__ }, \
.num_expected_modes = sizeof((expected_mode_func_t[]) { __VA_ARGS__ }) / \
(sizeof(expected_mode_func_t)), \
}
#define TV_MODE_TEST(_name, _supported, _default, ...) \
_TV_MODE_TEST(_name, _supported, _default, false, 0, __VA_ARGS__)
#define TV_MODE_TEST_CMDLINE(_name, _supported, _default, _cmdline, ...) \
_TV_MODE_TEST(_name, _supported, _default, true, _cmdline, __VA_ARGS__)
static void
drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
{
const struct drm_connector_helper_tv_get_modes_test *params = test->param_value;
struct drm_probe_helper_test_priv *priv = test->priv;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_display_mode *mode;
const struct drm_display_mode *expected;
size_t len;
int ret;
if (params->cmdline) {
cmdline->tv_mode_specified = true;
cmdline->tv_mode = params->cmdline_mode;
}
ret = drm_mode_create_tv_properties(priv->drm, params->supported_tv_modes);
KUNIT_ASSERT_EQ(test, ret, 0);
drm_object_attach_property(&connector->base,
priv->drm->mode_config.tv_mode_property,
params->default_mode);
mutex_lock(&priv->drm->mode_config.mutex);
ret = drm_connector_helper_tv_get_modes(connector);
KUNIT_EXPECT_EQ(test, ret, params->num_expected_modes);
len = 0;
list_for_each_entry(mode, &connector->probed_modes, head)
len++;
KUNIT_EXPECT_EQ(test, len, params->num_expected_modes);
if (params->num_expected_modes >= 1) {
mode = list_first_entry_or_null(&connector->probed_modes,
struct drm_display_mode, head);
KUNIT_ASSERT_NOT_NULL(test, mode);
expected = params->expected_modes[0](priv->drm);
KUNIT_ASSERT_NOT_NULL(test, expected);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
}
if (params->num_expected_modes >= 2) {
mode = list_next_entry(mode, head);
KUNIT_ASSERT_NOT_NULL(test, mode);
expected = params->expected_modes[1](priv->drm);
KUNIT_ASSERT_NOT_NULL(test, expected);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
}
mutex_unlock(&priv->drm->mode_config.mutex);
}
static const
struct drm_connector_helper_tv_get_modes_test drm_connector_helper_tv_get_modes_tests[] = {
{ .name = "None" },
TV_MODE_TEST("PAL",
BIT(DRM_MODE_TV_MODE_PAL),
DRM_MODE_TV_MODE_PAL,
drm_mode_analog_pal_576i),
TV_MODE_TEST("NTSC",
BIT(DRM_MODE_TV_MODE_NTSC),
DRM_MODE_TV_MODE_NTSC,
drm_mode_analog_ntsc_480i),
TV_MODE_TEST("Both, NTSC Default",
BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
DRM_MODE_TV_MODE_NTSC,
drm_mode_analog_ntsc_480i, drm_mode_analog_pal_576i),
TV_MODE_TEST("Both, PAL Default",
BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
DRM_MODE_TV_MODE_PAL,
drm_mode_analog_pal_576i, drm_mode_analog_ntsc_480i),
TV_MODE_TEST_CMDLINE("Both, NTSC Default, with PAL on command-line",
BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
DRM_MODE_TV_MODE_NTSC,
DRM_MODE_TV_MODE_PAL,
drm_mode_analog_pal_576i, drm_mode_analog_ntsc_480i),
TV_MODE_TEST_CMDLINE("Both, PAL Default, with NTSC on command-line",
BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
DRM_MODE_TV_MODE_PAL,
DRM_MODE_TV_MODE_NTSC,
drm_mode_analog_ntsc_480i, drm_mode_analog_pal_576i),
};
static void
drm_connector_helper_tv_get_modes_desc(const struct drm_connector_helper_tv_get_modes_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
KUNIT_ARRAY_PARAM(drm_connector_helper_tv_get_modes,
drm_connector_helper_tv_get_modes_tests,
drm_connector_helper_tv_get_modes_desc);
static struct kunit_case drm_test_connector_helper_tv_get_modes_tests[] = {
KUNIT_CASE_PARAM(drm_test_connector_helper_tv_get_modes_check,
drm_connector_helper_tv_get_modes_gen_params),
{ }
};
static struct kunit_suite drm_test_connector_helper_tv_get_modes_suite = {
.name = "drm_connector_helper_tv_get_modes",
.init = drm_probe_helper_test_init,
.test_cases = drm_test_connector_helper_tv_get_modes_tests,
};
kunit_test_suite(drm_test_connector_helper_tv_get_modes_suite);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_probe_helper_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
#include <kunit/resource.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
static const struct drm_mode_config_funcs drm_mode_config_funcs = {
};
static int fake_probe(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver fake_platform_driver = {
.probe = fake_probe,
.driver = {
.name = KUNIT_DEVICE_NAME,
},
};
static void kunit_action_platform_driver_unregister(void *ptr)
{
struct platform_driver *drv = ptr;
platform_driver_unregister(drv);
}
static void kunit_action_platform_device_put(void *ptr)
{
struct platform_device *pdev = ptr;
platform_device_put(pdev);
}
static void kunit_action_platform_device_del(void *ptr)
{
struct platform_device *pdev = ptr;
platform_device_del(pdev);
}
/**
* drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
* @test: The test context object
*
* This allocates a fake struct &device to create a mock for a KUnit
* test. The device will also be bound to a fake driver. It will thus be
* able to leverage the usual infrastructure and most notably the
* device-managed resources just like a "real" device.
*
* Resources will be cleaned up automatically, but the removal can be
* forced using @drm_kunit_helper_free_device.
*
* Returns:
* A pointer to the new device, or an ERR_PTR() otherwise.
*/
struct device *drm_kunit_helper_alloc_device(struct kunit *test)
{
struct platform_device *pdev;
int ret;
ret = platform_driver_register(&fake_platform_driver);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = kunit_add_action_or_reset(test,
kunit_action_platform_driver_unregister,
&fake_platform_driver);
KUNIT_ASSERT_EQ(test, ret, 0);
pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
ret = kunit_add_action_or_reset(test,
kunit_action_platform_device_put,
pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = platform_device_add(pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = kunit_add_action_or_reset(test,
kunit_action_platform_device_del,
pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
return &pdev->dev;
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
/**
* drm_kunit_helper_free_device - Frees a mock device
* @test: The test context object
* @dev: The device to free
*
* Frees a device allocated with drm_kunit_helper_alloc_device().
*/
void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
kunit_release_action(test,
kunit_action_platform_device_del,
pdev);
kunit_release_action(test,
kunit_action_platform_device_put,
pdev);
kunit_release_action(test,
kunit_action_platform_driver_unregister,
pdev);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
struct drm_device *
__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
struct device *dev,
size_t size, size_t offset,
const struct drm_driver *driver)
{
struct drm_device *drm;
void *container;
int ret;
container = __devm_drm_dev_alloc(dev, driver, size, offset);
if (IS_ERR(container))
return ERR_CAST(container);
drm = container + offset;
drm->mode_config.funcs = &drm_mode_config_funcs;
ret = drmm_mode_config_init(drm);
if (ret)
return ERR_PTR(ret);
return drm;
}
EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
static void action_drm_release_context(void *ptr)
{
struct drm_modeset_acquire_ctx *ctx = ptr;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
}
/**
* drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context
* @test: The test context object
*
* Allocates and initializes a modeset acquire context.
*
* The context is tied to the kunit test context, so we must not call
* drm_modeset_acquire_fini() on it, it will be done so automatically.
*
* Returns:
* An ERR_PTR on error, a pointer to the newly allocated context otherwise
*/
struct drm_modeset_acquire_ctx *
drm_kunit_helper_acquire_ctx_alloc(struct kunit *test)
{
struct drm_modeset_acquire_ctx *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ctx);
drm_modeset_acquire_init(ctx, 0);
ret = kunit_add_action_or_reset(test,
action_drm_release_context,
ctx);
if (ret)
return ERR_PTR(ret);
return ctx;
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc);
static void kunit_action_drm_atomic_state_put(void *ptr)
{
struct drm_atomic_state *state = ptr;
drm_atomic_state_put(state);
}
/**
* drm_kunit_helper_atomic_state_alloc - Allocates an atomic state
* @test: The test context object
* @drm: The device to alloc the state for
* @ctx: Locking context for that atomic update
*
* Allocates a empty atomic state.
*
* The state is tied to the kunit test context, so we must not call
* drm_atomic_state_put() on it, it will be done so automatically.
*
* Returns:
* An ERR_PTR on error, a pointer to the newly allocated state otherwise
*/
struct drm_atomic_state *
drm_kunit_helper_atomic_state_alloc(struct kunit *test,
struct drm_device *drm,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
int ret;
state = drm_atomic_state_alloc(drm);
if (!state)
return ERR_PTR(-ENOMEM);
ret = kunit_add_action_or_reset(test,
kunit_action_drm_atomic_state_put,
state);
if (ret)
return ERR_PTR(ret);
state->acquire_ctx = ctx;
return state;
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_atomic_state_alloc);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_kunit_helpers.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*/
#define pr_fmt(fmt) "drm_exec: " fmt
#include <kunit/test.h>
#include <linux/module.h>
#include <linux/prime_numbers.h>
#include <drm/drm_exec.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_kunit_helpers.h>
#include "../lib/drm_random.h"
struct drm_exec_priv {
struct device *dev;
struct drm_device *drm;
};
static int drm_exec_test_init(struct kunit *test)
{
struct drm_exec_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
test->priv = priv;
priv->dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev, sizeof(*priv->drm), 0,
DRIVER_MODESET);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
return 0;
}
static void sanitycheck(struct kunit *test)
{
struct drm_exec exec;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_fini(&exec);
KUNIT_SUCCEED(test);
}
static void test_lock(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
struct drm_gem_object gobj = { };
struct drm_exec exec;
int ret;
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
KUNIT_EXPECT_EQ(test, ret, 0);
if (ret)
break;
}
drm_exec_fini(&exec);
}
static void test_lock_unlock(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
struct drm_gem_object gobj = { };
struct drm_exec exec;
int ret;
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
KUNIT_EXPECT_EQ(test, ret, 0);
if (ret)
break;
drm_exec_unlock_obj(&exec, &gobj);
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
KUNIT_EXPECT_EQ(test, ret, 0);
if (ret)
break;
}
drm_exec_fini(&exec);
}
static void test_duplicates(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
struct drm_gem_object gobj = { };
struct drm_exec exec;
int ret;
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
KUNIT_EXPECT_EQ(test, ret, 0);
if (ret)
break;
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
KUNIT_EXPECT_EQ(test, ret, 0);
if (ret)
break;
}
drm_exec_unlock_obj(&exec, &gobj);
drm_exec_fini(&exec);
}
static void test_prepare(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
struct drm_gem_object gobj = { };
struct drm_exec exec;
int ret;
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_prepare_obj(&exec, &gobj, 1);
drm_exec_retry_on_contention(&exec);
KUNIT_EXPECT_EQ(test, ret, 0);
if (ret)
break;
}
drm_exec_fini(&exec);
drm_gem_private_object_fini(&gobj);
}
static void test_prepare_array(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
struct drm_gem_object gobj1 = { };
struct drm_gem_object gobj2 = { };
struct drm_gem_object *array[] = { &gobj1, &gobj2 };
struct drm_exec exec;
int ret;
drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec)
ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array),
1);
KUNIT_EXPECT_EQ(test, ret, 0);
drm_exec_fini(&exec);
drm_gem_private_object_fini(&gobj1);
drm_gem_private_object_fini(&gobj2);
}
static void test_multiple_loops(struct kunit *test)
{
struct drm_exec exec;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec)
{
break;
}
drm_exec_fini(&exec);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec)
{
break;
}
drm_exec_fini(&exec);
KUNIT_SUCCEED(test);
}
static struct kunit_case drm_exec_tests[] = {
KUNIT_CASE(sanitycheck),
KUNIT_CASE(test_lock),
KUNIT_CASE(test_lock_unlock),
KUNIT_CASE(test_duplicates),
KUNIT_CASE(test_prepare),
KUNIT_CASE(test_prepare_array),
KUNIT_CASE(test_multiple_loops),
{}
};
static struct kunit_suite drm_exec_test_suite = {
.name = "drm_exec",
.init = drm_exec_test_init,
.test_cases = drm_exec_tests,
};
kunit_test_suite(drm_exec_test_suite);
MODULE_AUTHOR("AMD");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/tests/drm_exec_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_rect functions
*
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_rect.h>
#include <drm/drm_mode.h>
#include <linux/string_helpers.h>
#include <linux/errno.h>
static void drm_rect_compare(struct kunit *test, const struct drm_rect *r,
const struct drm_rect *expected)
{
KUNIT_EXPECT_EQ(test, r->x1, expected->x1);
KUNIT_EXPECT_EQ(test, r->y1, expected->y1);
KUNIT_EXPECT_EQ(test, drm_rect_width(r), drm_rect_width(expected));
KUNIT_EXPECT_EQ(test, drm_rect_height(r), drm_rect_height(expected));
}
static void drm_test_rect_clip_scaled_div_by_zero(struct kunit *test)
{
struct drm_rect src, dst, clip;
bool visible;
/*
* Make sure we don't divide by zero when dst
* width/height is zero and dst and clip do not intersect.
*/
drm_rect_init(&src, 0, 0, 0, 0);
drm_rect_init(&dst, 0, 0, 0, 0);
drm_rect_init(&clip, 1, 1, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
drm_rect_init(&src, 0, 0, 0, 0);
drm_rect_init(&dst, 3, 3, 0, 0);
drm_rect_init(&clip, 1, 1, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
}
static void drm_test_rect_clip_scaled_not_clipped(struct kunit *test)
{
struct drm_rect src, dst, clip;
bool visible;
/* 1:1 scaling */
drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
drm_rect_init(&dst, 0, 0, 1, 1);
drm_rect_init(&clip, 0, 0, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 2:1 scaling */
drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
drm_rect_init(&dst, 0, 0, 1, 1);
drm_rect_init(&clip, 0, 0, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 1:2 scaling */
drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
drm_rect_init(&dst, 0, 0, 2, 2);
drm_rect_init(&clip, 0, 0, 2, 2);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 ||
dst.y1 != 0 || dst.y2 != 2, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
}
static void drm_test_rect_clip_scaled_clipped(struct kunit *test)
{
struct drm_rect src, dst, clip;
bool visible;
/* 1:1 scaling top/left clip */
drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
drm_rect_init(&dst, 0, 0, 2, 2);
drm_rect_init(&clip, 0, 0, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 1:1 scaling bottom/right clip */
drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
drm_rect_init(&dst, 0, 0, 2, 2);
drm_rect_init(&clip, 1, 1, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
dst.y2 != 2, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 2:1 scaling top/left clip */
drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
drm_rect_init(&dst, 0, 0, 2, 2);
drm_rect_init(&clip, 0, 0, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 || dst.y1 != 0 ||
dst.y2 != 1, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 2:1 scaling bottom/right clip */
drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
drm_rect_init(&dst, 0, 0, 2, 2);
drm_rect_init(&clip, 1, 1, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
src.y1 != 2 << 16 || src.y2 != 4 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
dst.y2 != 2, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 1:2 scaling top/left clip */
drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
drm_rect_init(&dst, 0, 0, 4, 4);
drm_rect_init(&clip, 0, 0, 2, 2);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 || dst.y1 != 0 ||
dst.y2 != 2, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
/* 1:2 scaling bottom/right clip */
drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
drm_rect_init(&dst, 0, 0, 4, 4);
drm_rect_init(&clip, 2, 2, 2, 2);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 2 || dst.x2 != 4 || dst.y1 != 2 ||
dst.y2 != 4, "Destination badly clipped\n");
KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
}
static void drm_test_rect_clip_scaled_signed_vs_unsigned(struct kunit *test)
{
struct drm_rect src, dst, clip;
bool visible;
/*
* 'clip.x2 - dst.x1 >= dst width' could result a negative
* src rectangle width which is no longer expected by the
* code as it's using unsigned types. This could lead to
* the clipped source rectangle appering visible when it
* should have been fully clipped. Make sure both rectangles
* end up invisible.
*/
drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
drm_rect_init(&dst, 0, 0, 2, 2);
drm_rect_init(&clip, 3, 3, 1, 1);
visible = drm_rect_clip_scaled(&src, &dst, &clip);
KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination should not be visible\n");
KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
}
struct drm_rect_intersect_case {
const char *description;
struct drm_rect r1, r2;
bool should_be_visible;
struct drm_rect expected_intersection;
};
static const struct drm_rect_intersect_case drm_rect_intersect_cases[] = {
{
.description = "top-left x bottom-right",
.r1 = DRM_RECT_INIT(1, 1, 2, 2),
.r2 = DRM_RECT_INIT(0, 0, 2, 2),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 1, 1, 1),
},
{
.description = "top-right x bottom-left",
.r1 = DRM_RECT_INIT(0, 0, 2, 2),
.r2 = DRM_RECT_INIT(1, -1, 2, 2),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 0, 1, 1),
},
{
.description = "bottom-left x top-right",
.r1 = DRM_RECT_INIT(1, -1, 2, 2),
.r2 = DRM_RECT_INIT(0, 0, 2, 2),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 0, 1, 1),
},
{
.description = "bottom-right x top-left",
.r1 = DRM_RECT_INIT(0, 0, 2, 2),
.r2 = DRM_RECT_INIT(1, 1, 2, 2),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 1, 1, 1),
},
{
.description = "right x left",
.r1 = DRM_RECT_INIT(0, 0, 2, 1),
.r2 = DRM_RECT_INIT(1, 0, 3, 1),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 0, 1, 1),
},
{
.description = "left x right",
.r1 = DRM_RECT_INIT(1, 0, 3, 1),
.r2 = DRM_RECT_INIT(0, 0, 2, 1),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 0, 1, 1),
},
{
.description = "up x bottom",
.r1 = DRM_RECT_INIT(0, 0, 1, 2),
.r2 = DRM_RECT_INIT(0, -1, 1, 3),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(0, 0, 1, 2),
},
{
.description = "bottom x up",
.r1 = DRM_RECT_INIT(0, -1, 1, 3),
.r2 = DRM_RECT_INIT(0, 0, 1, 2),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(0, 0, 1, 2),
},
{
.description = "touching corner",
.r1 = DRM_RECT_INIT(0, 0, 1, 1),
.r2 = DRM_RECT_INIT(1, 1, 2, 2),
.should_be_visible = false,
.expected_intersection = DRM_RECT_INIT(1, 1, 0, 0),
},
{
.description = "touching side",
.r1 = DRM_RECT_INIT(0, 0, 1, 1),
.r2 = DRM_RECT_INIT(1, 0, 1, 1),
.should_be_visible = false,
.expected_intersection = DRM_RECT_INIT(1, 0, 0, 1),
},
{
.description = "equal rects",
.r1 = DRM_RECT_INIT(0, 0, 2, 2),
.r2 = DRM_RECT_INIT(0, 0, 2, 2),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(0, 0, 2, 2),
},
{
.description = "inside another",
.r1 = DRM_RECT_INIT(0, 0, 2, 2),
.r2 = DRM_RECT_INIT(1, 1, 1, 1),
.should_be_visible = true,
.expected_intersection = DRM_RECT_INIT(1, 1, 1, 1),
},
{
.description = "far away",
.r1 = DRM_RECT_INIT(0, 0, 1, 1),
.r2 = DRM_RECT_INIT(3, 6, 1, 1),
.should_be_visible = false,
.expected_intersection = DRM_RECT_INIT(3, 6, -2, -5),
},
{
.description = "points intersecting",
.r1 = DRM_RECT_INIT(5, 10, 0, 0),
.r2 = DRM_RECT_INIT(5, 10, 0, 0),
.should_be_visible = false,
.expected_intersection = DRM_RECT_INIT(5, 10, 0, 0),
},
{
.description = "points not intersecting",
.r1 = DRM_RECT_INIT(0, 0, 0, 0),
.r2 = DRM_RECT_INIT(5, 10, 0, 0),
.should_be_visible = false,
.expected_intersection = DRM_RECT_INIT(5, 10, -5, -10),
},
};
static void drm_rect_intersect_case_desc(const struct drm_rect_intersect_case *t, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE,
"%s: " DRM_RECT_FMT " x " DRM_RECT_FMT,
t->description, DRM_RECT_ARG(&t->r1), DRM_RECT_ARG(&t->r2));
}
KUNIT_ARRAY_PARAM(drm_rect_intersect, drm_rect_intersect_cases, drm_rect_intersect_case_desc);
static void drm_test_rect_intersect(struct kunit *test)
{
const struct drm_rect_intersect_case *params = test->param_value;
struct drm_rect r1_aux = params->r1;
bool visible;
visible = drm_rect_intersect(&r1_aux, ¶ms->r2);
KUNIT_EXPECT_EQ(test, visible, params->should_be_visible);
drm_rect_compare(test, &r1_aux, ¶ms->expected_intersection);
}
struct drm_rect_scale_case {
const char *name;
struct drm_rect src, dst;
int min_range, max_range;
int expected_scaling_factor;
};
static const struct drm_rect_scale_case drm_rect_scale_cases[] = {
{
.name = "normal use",
.src = DRM_RECT_INIT(0, 0, 2 << 16, 2 << 16),
.dst = DRM_RECT_INIT(0, 0, 1 << 16, 1 << 16),
.min_range = 0, .max_range = INT_MAX,
.expected_scaling_factor = 2,
},
{
.name = "out of max range",
.src = DRM_RECT_INIT(0, 0, 10 << 16, 10 << 16),
.dst = DRM_RECT_INIT(0, 0, 1 << 16, 1 << 16),
.min_range = 3, .max_range = 5,
.expected_scaling_factor = -ERANGE,
},
{
.name = "out of min range",
.src = DRM_RECT_INIT(0, 0, 2 << 16, 2 << 16),
.dst = DRM_RECT_INIT(0, 0, 1 << 16, 1 << 16),
.min_range = 3, .max_range = 5,
.expected_scaling_factor = -ERANGE,
},
{
.name = "zero dst",
.src = DRM_RECT_INIT(0, 0, 2 << 16, 2 << 16),
.dst = DRM_RECT_INIT(0, 0, 0 << 16, 0 << 16),
.min_range = 0, .max_range = INT_MAX,
.expected_scaling_factor = 0,
},
{
.name = "negative src",
.src = DRM_RECT_INIT(0, 0, -(1 << 16), -(1 << 16)),
.dst = DRM_RECT_INIT(0, 0, 1 << 16, 1 << 16),
.min_range = 0, .max_range = INT_MAX,
.expected_scaling_factor = -EINVAL,
},
{
.name = "negative dst",
.src = DRM_RECT_INIT(0, 0, 1 << 16, 1 << 16),
.dst = DRM_RECT_INIT(0, 0, -(1 << 16), -(1 << 16)),
.min_range = 0, .max_range = INT_MAX,
.expected_scaling_factor = -EINVAL,
},
};
static void drm_rect_scale_case_desc(const struct drm_rect_scale_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(drm_rect_scale, drm_rect_scale_cases, drm_rect_scale_case_desc);
static void drm_test_rect_calc_hscale(struct kunit *test)
{
const struct drm_rect_scale_case *params = test->param_value;
int scaling_factor;
scaling_factor = drm_rect_calc_hscale(¶ms->src, ¶ms->dst,
params->min_range, params->max_range);
KUNIT_EXPECT_EQ(test, scaling_factor, params->expected_scaling_factor);
}
static void drm_test_rect_calc_vscale(struct kunit *test)
{
const struct drm_rect_scale_case *params = test->param_value;
int scaling_factor;
scaling_factor = drm_rect_calc_vscale(¶ms->src, ¶ms->dst,
params->min_range, params->max_range);
KUNIT_EXPECT_EQ(test, scaling_factor, params->expected_scaling_factor);
}
struct drm_rect_rotate_case {
const char *name;
unsigned int rotation;
struct drm_rect rect;
int width, height;
struct drm_rect expected;
};
static const struct drm_rect_rotate_case drm_rect_rotate_cases[] = {
{
.name = "reflect-x",
.rotation = DRM_MODE_REFLECT_X,
.rect = DRM_RECT_INIT(0, 0, 5, 5),
.width = 5, .height = 10,
.expected = DRM_RECT_INIT(0, 0, 5, 5),
},
{
.name = "reflect-y",
.rotation = DRM_MODE_REFLECT_Y,
.rect = DRM_RECT_INIT(2, 0, 5, 5),
.width = 5, .height = 10,
.expected = DRM_RECT_INIT(2, 5, 5, 5),
},
{
.name = "rotate-0",
.rotation = DRM_MODE_ROTATE_0,
.rect = DRM_RECT_INIT(0, 2, 5, 5),
.width = 5, .height = 10,
.expected = DRM_RECT_INIT(0, 2, 5, 5),
},
{
.name = "rotate-90",
.rotation = DRM_MODE_ROTATE_90,
.rect = DRM_RECT_INIT(0, 0, 5, 10),
.width = 5, .height = 10,
.expected = DRM_RECT_INIT(0, 0, 10, 5),
},
{
.name = "rotate-180",
.rotation = DRM_MODE_ROTATE_180,
.rect = DRM_RECT_INIT(11, 3, 5, 10),
.width = 5, .height = 10,
.expected = DRM_RECT_INIT(-11, -3, 5, 10),
},
{
.name = "rotate-270",
.rotation = DRM_MODE_ROTATE_270,
.rect = DRM_RECT_INIT(6, 3, 5, 10),
.width = 5, .height = 10,
.expected = DRM_RECT_INIT(-3, 6, 10, 5),
},
};
static void drm_rect_rotate_case_desc(const struct drm_rect_rotate_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(drm_rect_rotate, drm_rect_rotate_cases, drm_rect_rotate_case_desc);
static void drm_test_rect_rotate(struct kunit *test)
{
const struct drm_rect_rotate_case *params = test->param_value;
struct drm_rect r = params->rect;
drm_rect_rotate(&r, params->width, params->height, params->rotation);
drm_rect_compare(test, &r, ¶ms->expected);
}
static void drm_test_rect_rotate_inv(struct kunit *test)
{
const struct drm_rect_rotate_case *params = test->param_value;
struct drm_rect r = params->expected;
drm_rect_rotate_inv(&r, params->width, params->height, params->rotation);
drm_rect_compare(test, &r, ¶ms->rect);
}
static struct kunit_case drm_rect_tests[] = {
KUNIT_CASE(drm_test_rect_clip_scaled_div_by_zero),
KUNIT_CASE(drm_test_rect_clip_scaled_not_clipped),
KUNIT_CASE(drm_test_rect_clip_scaled_clipped),
KUNIT_CASE(drm_test_rect_clip_scaled_signed_vs_unsigned),
KUNIT_CASE_PARAM(drm_test_rect_intersect, drm_rect_intersect_gen_params),
KUNIT_CASE_PARAM(drm_test_rect_calc_hscale, drm_rect_scale_gen_params),
KUNIT_CASE_PARAM(drm_test_rect_calc_vscale, drm_rect_scale_gen_params),
KUNIT_CASE_PARAM(drm_test_rect_rotate, drm_rect_rotate_gen_params),
KUNIT_CASE_PARAM(drm_test_rect_rotate_inv, drm_rect_rotate_gen_params),
{ }
};
static struct kunit_suite drm_rect_test_suite = {
.name = "drm_rect",
.test_cases = drm_rect_tests,
};
kunit_test_suite(drm_rect_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_rect_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kunit test for drm_modes functions
*/
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
#include <kunit/test.h>
#include <linux/units.h>
struct drm_test_modes_priv {
struct drm_device *drm;
struct device *dev;
};
static int drm_test_modes_init(struct kunit *test)
{
struct drm_test_modes_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
priv->dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
sizeof(*priv->drm), 0,
DRIVER_MODESET);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
test->priv = priv;
return 0;
}
static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
13500 * HZ_PER_KHZ, 720, 480,
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
/* BT.601 defines hsync_start at 736 for 480i */
KUNIT_EXPECT_EQ(test, mode->hsync_start, 736);
/*
* The NTSC standard expects a line to take 63.556us. With a
* pixel clock of 13.5 MHz, a pixel takes around 74ns, so we
* need to have 63556ns / 74ns = 858.
*
* This is also mandated by BT.601.
*/
KUNIT_EXPECT_EQ(test, mode->htotal, 858);
KUNIT_EXPECT_EQ(test, mode->vdisplay, 480);
KUNIT_EXPECT_EQ(test, mode->vtotal, 525);
}
static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
13500 * HZ_PER_KHZ, 720, 480,
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
mode = drm_mode_analog_ntsc_480i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
13500 * HZ_PER_KHZ, 720, 576,
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
/* BT.601 defines hsync_start at 732 for 576i */
KUNIT_EXPECT_EQ(test, mode->hsync_start, 732);
/*
* The PAL standard expects a line to take 64us. With a pixel
* clock of 13.5 MHz, a pixel takes around 74ns, so we need to
* have 64000ns / 74ns = 864.
*
* This is also mandated by BT.601.
*/
KUNIT_EXPECT_EQ(test, mode->htotal, 864);
KUNIT_EXPECT_EQ(test, mode->vdisplay, 576);
KUNIT_EXPECT_EQ(test, mode->vtotal, 625);
}
static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
13500 * HZ_PER_KHZ, 720, 576,
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
mode = drm_mode_analog_pal_576i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
static struct kunit_case drm_modes_analog_tv_tests[] = {
KUNIT_CASE(drm_test_modes_analog_tv_ntsc_480i),
KUNIT_CASE(drm_test_modes_analog_tv_ntsc_480i_inlined),
KUNIT_CASE(drm_test_modes_analog_tv_pal_576i),
KUNIT_CASE(drm_test_modes_analog_tv_pal_576i_inlined),
{ }
};
static struct kunit_suite drm_modes_analog_tv_test_suite = {
.name = "drm_modes_analog_tv",
.init = drm_test_modes_init,
.test_cases = drm_modes_analog_tv_tests,
};
kunit_test_suite(drm_modes_analog_tv_test_suite);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_modes_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test case for drm_damage_helper functions
*
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_drv.h>
struct drm_damage_mock {
struct drm_driver driver;
struct drm_device device;
struct drm_object_properties obj_props;
struct drm_plane plane;
struct drm_property prop;
struct drm_framebuffer fb;
struct drm_plane_state state;
struct drm_plane_state old_state;
};
static int drm_damage_helper_init(struct kunit *test)
{
struct drm_damage_mock *mock;
mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
mock->fb.width = 2048;
mock->fb.height = 2048;
mock->state.crtc = ZERO_SIZE_PTR;
mock->state.fb = &mock->fb;
mock->state.visible = true;
mock->old_state.plane = &mock->plane;
mock->state.plane = &mock->plane;
/* just enough so that drm_plane_enable_fb_damage_clips() works */
mock->device.driver = &mock->driver;
mock->device.mode_config.prop_fb_damage_clips = &mock->prop;
mock->plane.dev = &mock->device;
mock->obj_props.count = 0;
mock->plane.base.properties = &mock->obj_props;
mock->prop.base.id = 1; /* 0 is an invalid id */
mock->prop.dev = &mock->device;
drm_plane_enable_fb_damage_clips(&mock->plane);
test->priv = mock;
return 0;
}
static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
int y2)
{
state->src_x = x1;
state->src_y = y1;
state->src_w = x2 - x1;
state->src_h = y2 - y1;
state->src.x1 = x1;
state->src.y1 = y1;
state->src.x2 = x2;
state->src.y2 = y2;
}
static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
int y2)
{
r->x1 = x1;
r->y1 = y1;
r->x2 = x2;
r->y2 = y2;
}
static void set_damage_blob(struct drm_property_blob *damage_blob,
struct drm_mode_rect *r, u32 size)
{
damage_blob->length = size;
damage_blob->data = r;
}
static void set_plane_damage(struct drm_plane_state *state,
struct drm_property_blob *damage_blob)
{
state->fb_damage_clips = damage_blob;
}
static void check_damage_clip(struct kunit *test, struct drm_rect *r,
int x1, int y1, int x2, int y2)
{
struct drm_damage_mock *mock = test->priv;
struct drm_plane_state state = mock->state;
/*
* Round down x1/y1 and round up x2/y2. This is because damage is not in
* 16.16 fixed point so to catch all pixels.
*/
int src_x1 = state.src.x1 >> 16;
int src_y1 = state.src.y1 >> 16;
int src_x2 = (state.src.x2 >> 16) + !!(state.src.x2 & 0xFFFF);
int src_y2 = (state.src.y2 >> 16) + !!(state.src.y2 & 0xFFFF);
if (x1 >= x2 || y1 >= y2)
KUNIT_FAIL(test, "Cannot have damage clip with no dimension.");
if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2)
KUNIT_FAIL(test, "Damage cannot be outside rounded plane src.");
if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2)
KUNIT_FAIL(test, "Damage = %d %d %d %d, want = %d %d %d %d",
r->x1, r->y1, r->x2, r->y2, x1, y1, x2, y2);
}
static void drm_test_damage_iter_no_damage(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src same as fb size. */
set_plane_src(&mock->old_state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
set_plane_src(&mock->state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
check_damage_clip(test, &clip, 0, 0, 2048, 2048);
}
static void drm_test_damage_iter_no_damage_fractional_src(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src has fractional part. */
set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
"Should return rounded off plane src as damage.");
check_damage_clip(test, &clip, 3, 3, 1028, 772);
}
static void drm_test_damage_iter_no_damage_src_moved(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src moved since old plane state. */
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 10 << 16, 10 << 16,
(10 + 1024) << 16, (10 + 768) << 16);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
check_damage_clip(test, &clip, 10, 10, 1034, 778);
}
static void drm_test_damage_iter_no_damage_fractional_src_moved(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src has fractional part and it moved since old plane state. */
set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
set_plane_src(&mock->state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
check_damage_clip(test, &clip, 4, 4, 1029, 773);
}
static void drm_test_damage_iter_no_damage_not_visible(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
mock->state.visible = false;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
}
static void drm_test_damage_iter_no_damage_no_crtc(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
mock->state.crtc = NULL;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
}
static void drm_test_damage_iter_no_damage_no_fb(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
u32 num_hits = 0;
mock->state.fb = NULL;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
}
static void drm_test_damage_iter_simple_damage(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
/* Damage set to plane src */
set_damage_clip(&damage, 0, 0, 1024, 768);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
check_damage_clip(test, &clip, 0, 0, 1024, 768);
}
static void drm_test_damage_iter_single_damage(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
set_damage_clip(&damage, 256, 192, 768, 576);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
check_damage_clip(test, &clip, 256, 192, 768, 576);
}
static void drm_test_damage_iter_single_damage_intersect_src(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
/* Damage intersect with plane src. */
set_damage_clip(&damage, 256, 192, 1360, 768);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage clipped to src.");
check_damage_clip(test, &clip, 256, 192, 1024, 768);
}
static void drm_test_damage_iter_single_damage_outside_src(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
/* Damage clip outside plane src */
set_damage_clip(&damage, 1360, 1360, 1380, 1380);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
}
static void drm_test_damage_iter_single_damage_fractional_src(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src has fractional part. */
set_plane_src(&mock->old_state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_plane_src(&mock->state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_damage_clip(&damage, 10, 10, 256, 330);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
check_damage_clip(test, &clip, 10, 10, 256, 330);
}
static void drm_test_damage_iter_single_damage_intersect_fractional_src(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src has fractional part. */
set_plane_src(&mock->old_state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_plane_src(&mock->state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
/* Damage intersect with plane src. */
set_damage_clip(&damage, 10, 1, 1360, 330);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
"Should return damage clipped to rounded off src.");
check_damage_clip(test, &clip, 10, 4, 1029, 330);
}
static void drm_test_damage_iter_single_damage_outside_fractional_src(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src has fractional part. */
set_plane_src(&mock->old_state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_plane_src(&mock->state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
/* Damage clip outside plane src */
set_damage_clip(&damage, 1360, 1360, 1380, 1380);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
}
static void drm_test_damage_iter_single_damage_src_moved(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src moved since old plane state. */
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 10 << 16, 10 << 16,
(10 + 1024) << 16, (10 + 768) << 16);
set_damage_clip(&damage, 20, 30, 256, 256);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
"Should return plane src as damage.");
check_damage_clip(test, &clip, 10, 10, 1034, 778);
}
static void drm_test_damage_iter_single_damage_fractional_src_moved(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage;
struct drm_rect clip;
u32 num_hits = 0;
/* Plane src with fractional part moved since old plane state. */
set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
set_plane_src(&mock->state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
/* Damage intersect with plane src. */
set_damage_clip(&damage, 20, 30, 1360, 256);
set_damage_blob(&damage_blob, &damage, sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
"Should return rounded off plane as damage.");
check_damage_clip(test, &clip, 4, 4, 1029, 773);
}
static void drm_test_damage_iter_damage(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage[2];
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
/* 2 damage clips. */
set_damage_clip(&damage[0], 20, 30, 200, 180);
set_damage_clip(&damage[1], 240, 200, 280, 250);
set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
if (num_hits == 0)
check_damage_clip(test, &clip, 20, 30, 200, 180);
if (num_hits == 1)
check_damage_clip(test, &clip, 240, 200, 280, 250);
num_hits++;
}
KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
}
static void drm_test_damage_iter_damage_one_intersect(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage[2];
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_plane_src(&mock->state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
/* 2 damage clips, one intersect plane src. */
set_damage_clip(&damage[0], 20, 30, 200, 180);
set_damage_clip(&damage[1], 2, 2, 1360, 1360);
set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
if (num_hits == 0)
check_damage_clip(test, &clip, 20, 30, 200, 180);
if (num_hits == 1)
check_damage_clip(test, &clip, 4, 4, 1029, 773);
num_hits++;
}
KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
}
static void drm_test_damage_iter_damage_one_outside(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage[2];
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
/* 2 damage clips, one outside plane src. */
set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
set_damage_clip(&damage[1], 240, 200, 280, 250);
set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
check_damage_clip(test, &clip, 240, 200, 280, 250);
}
static void drm_test_damage_iter_damage_src_moved(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage[2];
struct drm_rect clip;
u32 num_hits = 0;
set_plane_src(&mock->old_state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
/* 2 damage clips, one outside plane src. */
set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
set_damage_clip(&damage[1], 240, 200, 280, 250);
set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
"Should return round off plane src as damage.");
check_damage_clip(test, &clip, 3, 3, 1028, 772);
}
static void drm_test_damage_iter_damage_not_visible(struct kunit *test)
{
struct drm_damage_mock *mock = test->priv;
struct drm_atomic_helper_damage_iter iter;
struct drm_property_blob damage_blob;
struct drm_mode_rect damage[2];
struct drm_rect clip;
u32 num_hits = 0;
mock->state.visible = false;
set_plane_src(&mock->old_state, 0x40002, 0x40002,
0x40002 + (1024 << 16), 0x40002 + (768 << 16));
set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
/* 2 damage clips, one outside plane src. */
set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
set_damage_clip(&damage[1], 240, 200, 280, 250);
set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
set_plane_damage(&mock->state, &damage_blob);
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drm_atomic_for_each_plane_damage(&iter, &clip)
num_hits++;
KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should not return any damage.");
}
static struct kunit_case drm_damage_helper_tests[] = {
KUNIT_CASE(drm_test_damage_iter_no_damage),
KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src),
KUNIT_CASE(drm_test_damage_iter_no_damage_src_moved),
KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src_moved),
KUNIT_CASE(drm_test_damage_iter_no_damage_not_visible),
KUNIT_CASE(drm_test_damage_iter_no_damage_no_crtc),
KUNIT_CASE(drm_test_damage_iter_no_damage_no_fb),
KUNIT_CASE(drm_test_damage_iter_simple_damage),
KUNIT_CASE(drm_test_damage_iter_single_damage),
KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_src),
KUNIT_CASE(drm_test_damage_iter_single_damage_outside_src),
KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src),
KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_fractional_src),
KUNIT_CASE(drm_test_damage_iter_single_damage_outside_fractional_src),
KUNIT_CASE(drm_test_damage_iter_single_damage_src_moved),
KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src_moved),
KUNIT_CASE(drm_test_damage_iter_damage),
KUNIT_CASE(drm_test_damage_iter_damage_one_intersect),
KUNIT_CASE(drm_test_damage_iter_damage_one_outside),
KUNIT_CASE(drm_test_damage_iter_damage_src_moved),
KUNIT_CASE(drm_test_damage_iter_damage_not_visible),
{ }
};
static struct kunit_suite drm_damage_helper_test_suite = {
.name = "drm_damage_helper",
.init = drm_damage_helper_init,
.test_cases = drm_damage_helper_tests,
};
kunit_test_suite(drm_damage_helper_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_damage_helper_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_plane_helper functions
*
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
static const struct drm_crtc_state crtc_state = {
.crtc = ZERO_SIZE_PTR,
.enable = true,
.active = true,
.mode = {
DRM_MODE("1024x768", 0, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
},
};
struct drm_check_plane_state_test {
const char *name;
const char *msg;
struct {
unsigned int x;
unsigned int y;
unsigned int w;
unsigned int h;
} src, src_expected;
struct {
int x;
int y;
unsigned int w;
unsigned int h;
} crtc, crtc_expected;
unsigned int rotation;
int min_scale;
int max_scale;
bool can_position;
};
static int drm_plane_helper_init(struct kunit *test)
{
const struct drm_check_plane_state_test *params = test->param_value;
struct drm_plane *plane;
struct drm_framebuffer *fb;
struct drm_plane_state *mock;
plane = kunit_kzalloc(test, sizeof(*plane), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, plane);
fb = kunit_kzalloc(test, sizeof(*fb), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, fb);
fb->width = 2048;
fb->height = 2048;
mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, mock);
mock->plane = plane;
mock->crtc = ZERO_SIZE_PTR;
mock->fb = fb;
mock->rotation = params->rotation;
mock->src_x = params->src.x;
mock->src_y = params->src.y;
mock->src_w = params->src.w;
mock->src_h = params->src.h;
mock->crtc_x = params->crtc.x;
mock->crtc_y = params->crtc.y;
mock->crtc_w = params->crtc.w;
mock->crtc_h = params->crtc.h;
test->priv = mock;
return 0;
}
static void check_src_eq(struct kunit *test, struct drm_plane_state *plane_state,
unsigned int src_x, unsigned int src_y,
unsigned int src_w, unsigned int src_h)
{
struct drm_rect expected = DRM_RECT_INIT(src_x, src_y, src_w, src_h);
KUNIT_ASSERT_GE_MSG(test, plane_state->src.x1, 0,
"src x coordinate %x should never be below 0, src: " DRM_RECT_FP_FMT,
plane_state->src.x1, DRM_RECT_FP_ARG(&plane_state->src));
KUNIT_ASSERT_GE_MSG(test, plane_state->src.y1, 0,
"src y coordinate %x should never be below 0, src: " DRM_RECT_FP_FMT,
plane_state->src.y1, DRM_RECT_FP_ARG(&plane_state->src));
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_equals(&plane_state->src, &expected),
"dst: " DRM_RECT_FP_FMT ", expected: " DRM_RECT_FP_FMT,
DRM_RECT_FP_ARG(&plane_state->src), DRM_RECT_FP_ARG(&expected));
}
static void check_crtc_eq(struct kunit *test, struct drm_plane_state *plane_state,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h)
{
struct drm_rect expected = DRM_RECT_INIT(crtc_x, crtc_y, crtc_w, crtc_h);
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_equals(&plane_state->dst, &expected),
"dst: " DRM_RECT_FMT ", expected: " DRM_RECT_FMT,
DRM_RECT_ARG(&plane_state->dst), DRM_RECT_ARG(&expected));
}
static void drm_test_check_plane_state(struct kunit *test)
{
const struct drm_check_plane_state_test *params = test->param_value;
struct drm_plane_state *plane_state = test->priv;
KUNIT_ASSERT_EQ_MSG(test,
drm_atomic_helper_check_plane_state(plane_state, &crtc_state,
params->min_scale,
params->max_scale,
params->can_position, false),
0, params->msg);
KUNIT_EXPECT_TRUE(test, plane_state->visible);
check_src_eq(test, plane_state, params->src_expected.x, params->src_expected.y,
params->src_expected.w, params->src_expected.h);
check_crtc_eq(test, plane_state, params->crtc_expected.x, params->crtc_expected.y,
params->crtc_expected.w, params->crtc_expected.h);
}
static void drm_check_plane_state_desc(const struct drm_check_plane_state_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
static const struct drm_check_plane_state_test drm_check_plane_state_tests[] = {
{
.name = "clipping_simple",
.msg = "Simple clipping check should pass",
.src = { 0, 0,
2048 << 16,
2048 << 16 },
.crtc = { 0, 0, 2048, 2048 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = false,
.src_expected = { 0, 0, 1024 << 16, 768 << 16 },
.crtc_expected = { 0, 0, 1024, 768 },
},
{
.name = "clipping_rotate_reflect",
.msg = "Rotated clipping check should pass",
.src = { 0, 0,
2048 << 16,
2048 << 16 },
.crtc = { 0, 0, 2048, 2048 },
.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = false,
.src_expected = { 0, 0, 768 << 16, 1024 << 16 },
.crtc_expected = { 0, 0, 1024, 768 },
},
{
.name = "positioning_simple",
.msg = "Simple positioning should work",
.src = { 0, 0, 1023 << 16, 767 << 16 },
.crtc = { 0, 0, 1023, 767 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = true,
.src_expected = { 0, 0, 1023 << 16, 767 << 16 },
.crtc_expected = { 0, 0, 1023, 767 },
},
{
.name = "upscaling",
.msg = "Upscaling exactly 2x should work",
.src = { 0, 0, 512 << 16, 384 << 16 },
.crtc = { 0, 0, 1024, 768 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = 0x8000,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = false,
.src_expected = { 0, 0, 512 << 16, 384 << 16 },
.crtc_expected = { 0, 0, 1024, 768 },
},
{
.name = "downscaling",
.msg = "Should succeed with exact scaling limit",
.src = { 0, 0, 2048 << 16, 1536 << 16 },
.crtc = { 0, 0, 1024, 768 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = 0x20000,
.can_position = false,
.src_expected = { 0, 0, 2048 << 16, 1536 << 16 },
.crtc_expected = { 0, 0, 1024, 768 },
},
{
.name = "rounding1",
.msg = "Should succeed by clipping to exact multiple",
.src = { 0, 0, 0x40001, 0x40001 },
.crtc = { 1022, 766, 4, 4 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = 0x10001,
.can_position = true,
.src_expected = { 0, 0, 2 << 16, 2 << 16 },
.crtc_expected = { 1022, 766, 2, 2 },
},
{
.name = "rounding2",
.msg = "Should succeed by clipping to exact multiple",
.src = { 0x20001, 0x20001, 0x4040001, 0x3040001 },
.crtc = { -2, -2, 1028, 772 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = 0x10001,
.can_position = false,
.src_expected = { 0x40002, 0x40002, 1024 << 16, 768 << 16 },
.crtc_expected = { 0, 0, 1024, 768 },
},
{
.name = "rounding3",
.msg = "Should succeed by clipping to exact multiple",
.src = { 0, 0, 0x3ffff, 0x3ffff },
.crtc = { 1022, 766, 4, 4 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = 0xffff,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = true,
/* Should not be rounded to 0x20001, which would be upscaling. */
.src_expected = { 0, 0, 2 << 16, 2 << 16 },
.crtc_expected = { 1022, 766, 2, 2 },
},
{
.name = "rounding4",
.msg = "Should succeed by clipping to exact multiple",
.src = { 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff },
.crtc = { -2, -2, 1028, 772 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = 0xffff,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = false,
.src_expected = { 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16 },
.crtc_expected = { 0, 0, 1024, 768 },
},
};
KUNIT_ARRAY_PARAM(drm_check_plane_state, drm_check_plane_state_tests, drm_check_plane_state_desc);
static void drm_test_check_invalid_plane_state(struct kunit *test)
{
const struct drm_check_plane_state_test *params = test->param_value;
struct drm_plane_state *plane_state = test->priv;
KUNIT_ASSERT_LT_MSG(test,
drm_atomic_helper_check_plane_state(plane_state, &crtc_state,
params->min_scale,
params->max_scale,
params->can_position, false),
0, params->msg);
}
static const struct drm_check_plane_state_test drm_check_invalid_plane_state_tests[] = {
{
.name = "positioning_invalid",
.msg = "Should not be able to position on the crtc with can_position=false",
.src = { 0, 0, 1023 << 16, 767 << 16 },
.crtc = { 0, 0, 1023, 767 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = false,
},
{
.name = "upscaling_invalid",
.msg = "Upscaling out of range should fail",
.src = { 0, 0, 512 << 16, 384 << 16 },
.crtc = { 0, 0, 1024, 768 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = 0x8001,
.max_scale = DRM_PLANE_NO_SCALING,
.can_position = false,
},
{
.name = "downscaling_invalid",
.msg = "Downscaling out of range should fail",
.src = { 0, 0, 2048 << 16, 1536 << 16 },
.crtc = { 0, 0, 1024, 768 },
.rotation = DRM_MODE_ROTATE_0,
.min_scale = DRM_PLANE_NO_SCALING,
.max_scale = 0x1ffff,
.can_position = false,
},
};
KUNIT_ARRAY_PARAM(drm_check_invalid_plane_state, drm_check_invalid_plane_state_tests,
drm_check_plane_state_desc);
static struct kunit_case drm_plane_helper_test[] = {
KUNIT_CASE_PARAM(drm_test_check_plane_state, drm_check_plane_state_gen_params),
KUNIT_CASE_PARAM(drm_test_check_invalid_plane_state,
drm_check_invalid_plane_state_gen_params),
{}
};
static struct kunit_suite drm_plane_helper_test_suite = {
.name = "drm_plane_helper",
.init = drm_plane_helper_init,
.test_cases = drm_plane_helper_test,
};
kunit_test_suite(drm_plane_helper_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_plane_helper_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_format functions
*
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_fourcc.h>
static void drm_test_format_block_width_invalid(struct kunit *test)
{
const struct drm_format_info *info = NULL;
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
}
static void drm_test_format_block_width_one_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
}
static void drm_test_format_block_width_two_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
}
static void drm_test_format_block_width_three_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 3), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
}
static void drm_test_format_block_width_tiled(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
}
static void drm_test_format_block_height_invalid(struct kunit *test)
{
const struct drm_format_info *info = NULL;
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
}
static void drm_test_format_block_height_one_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
}
static void drm_test_format_block_height_two_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
}
static void drm_test_format_block_height_three_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 3), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
}
static void drm_test_format_block_height_tiled(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
}
static void drm_test_format_min_pitch_invalid(struct kunit *test)
{
const struct drm_format_info *info = NULL;
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
}
static void drm_test_format_min_pitch_one_plane_8bpp(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB332);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
(uint64_t)(UINT_MAX - 1));
}
static void drm_test_format_min_pitch_one_plane_16bpp(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX * 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
(uint64_t)(UINT_MAX - 1) * 2);
}
static void drm_test_format_min_pitch_one_plane_24bpp(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB888);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 3);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 6);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1920);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 3072);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 5760);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 12288);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2013);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX * 3);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
(uint64_t)(UINT_MAX - 1) * 3);
}
static void drm_test_format_min_pitch_one_plane_32bpp(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ABGR8888);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 4);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 8);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 2560);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 4096);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 7680);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 16384);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2684);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX * 4);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
(uint64_t)(UINT_MAX - 1) * 4);
}
static void drm_test_format_min_pitch_two_plane(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 640);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 1024);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 1920);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 4096);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 672);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
(uint64_t)UINT_MAX + 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
(uint64_t)(UINT_MAX - 1));
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
(uint64_t)(UINT_MAX - 1));
}
static void drm_test_format_min_pitch_three_plane_8bpp(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 3, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 1), 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 320);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 320), 320);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 512);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 512), 512);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 960);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 960), 960);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 2048);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2048), 2048);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 336);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 336), 336);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
(uint64_t)UINT_MAX / 2 + 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1),
(uint64_t)UINT_MAX / 2 + 1);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2),
(uint64_t)(UINT_MAX - 1) / 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
(uint64_t)(UINT_MAX - 1) / 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2),
(uint64_t)(UINT_MAX - 1) / 2);
}
static void drm_test_format_min_pitch_tiled(struct kunit *test)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L2);
KUNIT_ASSERT_NOT_NULL(test, info);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
(uint64_t)UINT_MAX * 2);
KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
(uint64_t)(UINT_MAX - 1) * 2);
}
static struct kunit_case drm_format_tests[] = {
KUNIT_CASE(drm_test_format_block_width_invalid),
KUNIT_CASE(drm_test_format_block_width_one_plane),
KUNIT_CASE(drm_test_format_block_width_two_plane),
KUNIT_CASE(drm_test_format_block_width_three_plane),
KUNIT_CASE(drm_test_format_block_width_tiled),
KUNIT_CASE(drm_test_format_block_height_invalid),
KUNIT_CASE(drm_test_format_block_height_one_plane),
KUNIT_CASE(drm_test_format_block_height_two_plane),
KUNIT_CASE(drm_test_format_block_height_three_plane),
KUNIT_CASE(drm_test_format_block_height_tiled),
KUNIT_CASE(drm_test_format_min_pitch_invalid),
KUNIT_CASE(drm_test_format_min_pitch_one_plane_8bpp),
KUNIT_CASE(drm_test_format_min_pitch_one_plane_16bpp),
KUNIT_CASE(drm_test_format_min_pitch_one_plane_24bpp),
KUNIT_CASE(drm_test_format_min_pitch_one_plane_32bpp),
KUNIT_CASE(drm_test_format_min_pitch_two_plane),
KUNIT_CASE(drm_test_format_min_pitch_three_plane_8bpp),
KUNIT_CASE(drm_test_format_min_pitch_tiled),
{}
};
static struct kunit_suite drm_format_test_suite = {
.name = "drm_format",
.test_cases = drm_format_tests,
};
kunit_test_suite(drm_format_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_format_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
#include <kunit/resource.h>
#include <linux/device.h>
/* Ought to be enough for anybody */
#define TEST_TIMEOUT_MS 100
struct managed_test_priv {
bool action_done;
wait_queue_head_t action_wq;
};
static void drm_action(struct drm_device *drm, void *ptr)
{
struct managed_test_priv *priv = ptr;
priv->action_done = true;
wake_up_interruptible(&priv->action_wq);
}
static void drm_test_managed_run_action(struct kunit *test)
{
struct managed_test_priv *priv;
struct drm_device *drm;
struct device *dev;
int ret;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
init_waitqueue_head(&priv->action_wq);
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
ret = drmm_add_action_or_reset(drm, drm_action, priv);
KUNIT_EXPECT_EQ(test, ret, 0);
ret = drm_dev_register(drm, 0);
KUNIT_ASSERT_EQ(test, ret, 0);
drm_dev_unregister(drm);
drm_kunit_helper_free_device(test, dev);
ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
msecs_to_jiffies(TEST_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
static struct kunit_case drm_managed_tests[] = {
KUNIT_CASE(drm_test_managed_run_action),
{}
};
static struct kunit_suite drm_managed_test_suite = {
.name = "drm-test-managed",
.test_cases = drm_managed_tests
};
kunit_test_suite(drm_managed_test_suite);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_managed_test.c |
// SPDX-License-Identifier: GPL-2.0+
#include <kunit/test.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#include "../drm_crtc_internal.h"
#define TEST_BUF_SIZE 50
struct convert_to_gray8_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
};
struct convert_to_rgb332_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
};
struct convert_to_rgb565_result {
unsigned int dst_pitch;
const u16 expected[TEST_BUF_SIZE];
const u16 expected_swab[TEST_BUF_SIZE];
};
struct convert_to_xrgb1555_result {
unsigned int dst_pitch;
const u16 expected[TEST_BUF_SIZE];
};
struct convert_to_argb1555_result {
unsigned int dst_pitch;
const u16 expected[TEST_BUF_SIZE];
};
struct convert_to_rgba5551_result {
unsigned int dst_pitch;
const u16 expected[TEST_BUF_SIZE];
};
struct convert_to_rgb888_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
};
struct convert_to_argb8888_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
};
struct convert_to_xrgb2101010_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
};
struct convert_to_argb2101010_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
};
struct convert_to_mono_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
};
struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
struct drm_rect clip;
const u32 xrgb8888[TEST_BUF_SIZE];
struct convert_to_gray8_result gray8_result;
struct convert_to_rgb332_result rgb332_result;
struct convert_to_rgb565_result rgb565_result;
struct convert_to_xrgb1555_result xrgb1555_result;
struct convert_to_argb1555_result argb1555_result;
struct convert_to_rgba5551_result rgba5551_result;
struct convert_to_rgb888_result rgb888_result;
struct convert_to_argb8888_result argb8888_result;
struct convert_to_xrgb2101010_result xrgb2101010_result;
struct convert_to_argb2101010_result argb2101010_result;
struct convert_to_mono_result mono_result;
};
static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
{
.name = "single_pixel_source_buffer",
.pitch = 1 * 4,
.clip = DRM_RECT_INIT(0, 0, 1, 1),
.xrgb8888 = { 0x01FF0000 },
.gray8_result = {
.dst_pitch = 0,
.expected = { 0x4C },
},
.rgb332_result = {
.dst_pitch = 0,
.expected = { 0xE0 },
},
.rgb565_result = {
.dst_pitch = 0,
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
.xrgb1555_result = {
.dst_pitch = 0,
.expected = { 0x7C00 },
},
.argb1555_result = {
.dst_pitch = 0,
.expected = { 0xFC00 },
},
.rgba5551_result = {
.dst_pitch = 0,
.expected = { 0xF801 },
},
.rgb888_result = {
.dst_pitch = 0,
.expected = { 0x00, 0x00, 0xFF },
},
.argb8888_result = {
.dst_pitch = 0,
.expected = { 0xFFFF0000 },
},
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = { 0x3FF00000 },
},
.argb2101010_result = {
.dst_pitch = 0,
.expected = { 0xFFF00000 },
},
.mono_result = {
.dst_pitch = 0,
.expected = { 0b0 },
},
},
{
.name = "single_pixel_clip_rectangle",
.pitch = 2 * 4,
.clip = DRM_RECT_INIT(1, 1, 1, 1),
.xrgb8888 = {
0x00000000, 0x00000000,
0x00000000, 0x10FF0000,
},
.gray8_result = {
.dst_pitch = 0,
.expected = { 0x4C },
},
.rgb332_result = {
.dst_pitch = 0,
.expected = { 0xE0 },
},
.rgb565_result = {
.dst_pitch = 0,
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
.xrgb1555_result = {
.dst_pitch = 0,
.expected = { 0x7C00 },
},
.argb1555_result = {
.dst_pitch = 0,
.expected = { 0xFC00 },
},
.rgba5551_result = {
.dst_pitch = 0,
.expected = { 0xF801 },
},
.rgb888_result = {
.dst_pitch = 0,
.expected = { 0x00, 0x00, 0xFF },
},
.argb8888_result = {
.dst_pitch = 0,
.expected = { 0xFFFF0000 },
},
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = { 0x3FF00000 },
},
.argb2101010_result = {
.dst_pitch = 0,
.expected = { 0xFFF00000 },
},
.mono_result = {
.dst_pitch = 0,
.expected = { 0b0 },
},
},
{
/* Well known colors: White, black, red, green, blue, magenta,
* yellow and cyan. Different values for the X in XRGB8888 to
* make sure it is ignored. Partial clip area.
*/
.name = "well_known_colors",
.pitch = 4 * 4,
.clip = DRM_RECT_INIT(1, 1, 2, 4),
.xrgb8888 = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x11FFFFFF, 0x22000000, 0x00000000,
0x00000000, 0x33FF0000, 0x4400FF00, 0x00000000,
0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000,
0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000,
},
.gray8_result = {
.dst_pitch = 0,
.expected = {
0xFF, 0x00,
0x4C, 0x99,
0x19, 0x66,
0xE5, 0xB2,
},
},
.rgb332_result = {
.dst_pitch = 0,
.expected = {
0xFF, 0x00,
0xE0, 0x1C,
0x03, 0xE3,
0xFC, 0x1F,
},
},
.rgb565_result = {
.dst_pitch = 0,
.expected = {
0xFFFF, 0x0000,
0xF800, 0x07E0,
0x001F, 0xF81F,
0xFFE0, 0x07FF,
},
.expected_swab = {
0xFFFF, 0x0000,
0x00F8, 0xE007,
0x1F00, 0x1FF8,
0xE0FF, 0xFF07,
},
},
.xrgb1555_result = {
.dst_pitch = 0,
.expected = {
0x7FFF, 0x0000,
0x7C00, 0x03E0,
0x001F, 0x7C1F,
0x7FE0, 0x03FF,
},
},
.argb1555_result = {
.dst_pitch = 0,
.expected = {
0xFFFF, 0x8000,
0xFC00, 0x83E0,
0x801F, 0xFC1F,
0xFFE0, 0x83FF,
},
},
.rgba5551_result = {
.dst_pitch = 0,
.expected = {
0xFFFF, 0x0001,
0xF801, 0x07C1,
0x003F, 0xF83F,
0xFFC1, 0x07FF,
},
},
.rgb888_result = {
.dst_pitch = 0,
.expected = {
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00,
0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
},
},
.argb8888_result = {
.dst_pitch = 0,
.expected = {
0xFFFFFFFF, 0xFF000000,
0xFFFF0000, 0xFF00FF00,
0xFF0000FF, 0xFFFF00FF,
0xFFFFFF00, 0xFF00FFFF,
},
},
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = {
0x3FFFFFFF, 0x00000000,
0x3FF00000, 0x000FFC00,
0x000003FF, 0x3FF003FF,
0x3FFFFC00, 0x000FFFFF,
},
},
.argb2101010_result = {
.dst_pitch = 0,
.expected = {
0xFFFFFFFF, 0xC0000000,
0xFFF00000, 0xC00FFC00,
0xC00003FF, 0xFFF003FF,
0xFFFFFC00, 0xC00FFFFF,
},
},
.mono_result = {
.dst_pitch = 0,
.expected = {
0b01,
0b10,
0b00,
0b11,
},
},
},
{
/* Randomly picked colors. Full buffer within the clip area. */
.name = "destination_pitch",
.pitch = 3 * 4,
.clip = DRM_RECT_INIT(0, 0, 3, 3),
.xrgb8888 = {
0xA10E449C, 0xB1114D05, 0xC1A8F303,
0xD16CF073, 0xA20E449C, 0xB2114D05,
0xC2A80303, 0xD26CF073, 0xA30E449C,
},
.gray8_result = {
.dst_pitch = 5,
.expected = {
0x3C, 0x33, 0xC4, 0x00, 0x00,
0xBB, 0x3C, 0x33, 0x00, 0x00,
0x34, 0xBB, 0x3C, 0x00, 0x00,
},
},
.rgb332_result = {
.dst_pitch = 5,
.expected = {
0x0A, 0x08, 0xBC, 0x00, 0x00,
0x7D, 0x0A, 0x08, 0x00, 0x00,
0xA0, 0x7D, 0x0A, 0x00, 0x00,
},
},
.rgb565_result = {
.dst_pitch = 10,
.expected = {
0x0A33, 0x1260, 0xAF80, 0x0000, 0x0000,
0x6F8E, 0x0A33, 0x1260, 0x0000, 0x0000,
0xA800, 0x6F8E, 0x0A33, 0x0000, 0x0000,
},
.expected_swab = {
0x330A, 0x6012, 0x80AF, 0x0000, 0x0000,
0x8E6F, 0x330A, 0x6012, 0x0000, 0x0000,
0x00A8, 0x8E6F, 0x330A, 0x0000, 0x0000,
},
},
.xrgb1555_result = {
.dst_pitch = 10,
.expected = {
0x0513, 0x0920, 0x57C0, 0x0000, 0x0000,
0x37CE, 0x0513, 0x0920, 0x0000, 0x0000,
0x5400, 0x37CE, 0x0513, 0x0000, 0x0000,
},
},
.argb1555_result = {
.dst_pitch = 10,
.expected = {
0x8513, 0x8920, 0xD7C0, 0x0000, 0x0000,
0xB7CE, 0x8513, 0x8920, 0x0000, 0x0000,
0xD400, 0xB7CE, 0x8513, 0x0000, 0x0000,
},
},
.rgba5551_result = {
.dst_pitch = 10,
.expected = {
0x0A27, 0x1241, 0xAF81, 0x0000, 0x0000,
0x6F9D, 0x0A27, 0x1241, 0x0000, 0x0000,
0xA801, 0x6F9D, 0x0A27, 0x0000, 0x0000,
},
},
.rgb888_result = {
.dst_pitch = 15,
.expected = {
0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0xF3, 0xA8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x03, 0x03, 0xA8, 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
.argb8888_result = {
.dst_pitch = 20,
.expected = {
0xFF0E449C, 0xFF114D05, 0xFFA8F303, 0x00000000, 0x00000000,
0xFF6CF073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000,
0xFFA80303, 0xFF6CF073, 0xFF0E449C, 0x00000000, 0x00000000,
},
},
.xrgb2101010_result = {
.dst_pitch = 20,
.expected = {
0x03844672, 0x0444D414, 0x2A2F3C0C, 0x00000000, 0x00000000,
0x1B1F0DCD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
0x2A20300C, 0x1B1F0DCD, 0x03844672, 0x00000000, 0x00000000,
},
},
.argb2101010_result = {
.dst_pitch = 20,
.expected = {
0xC3844672, 0xC444D414, 0xEA2F3C0C, 0x00000000, 0x00000000,
0xDB1F0DCD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000,
0xEA20300C, 0xDB1F0DCD, 0xC3844672, 0x00000000, 0x00000000,
},
},
.mono_result = {
.dst_pitch = 2,
.expected = {
0b100, 0b000,
0b001, 0b000,
0b010, 0b000,
},
},
},
};
/*
* conversion_buf_size - Return the destination buffer size required to convert
* between formats.
* @dst_format: destination buffer pixel format (DRM_FORMAT_*)
* @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @clip: Clip rectangle area to convert
*
* Returns:
* The size of the destination buffer or negative value on error.
*/
static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
const struct drm_rect *clip)
{
const struct drm_format_info *dst_fi = drm_format_info(dst_format);
if (!dst_fi)
return -EINVAL;
if (!dst_pitch)
dst_pitch = drm_format_info_min_pitch(dst_fi, 0, drm_rect_width(clip));
return dst_pitch * drm_rect_height(clip);
}
static u16 *le16buf_to_cpu(struct kunit *test, const __le16 *buf, size_t buf_size)
{
u16 *dst = NULL;
int n;
dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
if (!dst)
return NULL;
for (n = 0; n < buf_size; n++)
dst[n] = le16_to_cpu(buf[n]);
return dst;
}
static u32 *le32buf_to_cpu(struct kunit *test, const __le32 *buf, size_t buf_size)
{
u32 *dst = NULL;
int n;
dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
if (!dst)
return NULL;
for (n = 0; n < buf_size; n++)
dst[n] = le32_to_cpu((__force __le32)buf[n]);
return dst;
}
static __le32 *cpubuf_to_le32(struct kunit *test, const u32 *buf, size_t buf_size)
{
__le32 *dst = NULL;
int n;
dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
if (!dst)
return NULL;
for (n = 0; n < buf_size; n++)
dst[n] = cpu_to_le32(buf[n]);
return dst;
}
static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(convert_xrgb8888, convert_xrgb8888_cases,
convert_xrgb8888_case_desc);
static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_gray8_result *result = ¶ms->gray8_result;
size_t dst_size;
u8 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_R8, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_gray8(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb332_result *result = ¶ms->rgb332_result;
size_t dst_size;
u8 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_RGB332, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_rgb332(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb565_result *result = ¶ms->rgb565_result;
size_t dst_size;
u16 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_RGB565, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, false);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, true);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
}
static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_xrgb1555_result *result = ¶ms->xrgb1555_result;
size_t dst_size;
u16 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_XRGB1555, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_argb1555_result *result = ¶ms->argb1555_result;
size_t dst_size;
u16 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_ARGB1555, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgba5551_result *result = ¶ms->rgba5551_result;
size_t dst_size;
u16 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_RGBA5551, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb888_result *result = ¶ms->rgb888_result;
size_t dst_size;
u8 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_RGB888, result->dst_pitch,
¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
/*
* RGB888 expected results are already in little-endian
* order, so there's no need to convert the test output.
*/
drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_argb8888_result *result = ¶ms->argb8888_result;
size_t dst_size;
u32 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_ARGB8888,
result->dst_pitch, ¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_xrgb2101010_result *result = ¶ms->xrgb2101010_result;
size_t dst_size;
u32 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_XRGB2101010,
result->dst_pitch, ¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_argb2101010_result *result = ¶ms->argb2101010_result;
size_t dst_size;
u32 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_ARGB2101010,
result->dst_pitch, ¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_mono(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_mono_result *result = ¶ms->mono_result;
size_t dst_size;
u8 *buf = NULL;
__le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
dst_size = conversion_buf_size(DRM_FORMAT_C1, result->dst_pitch, ¶ms->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_mono(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb1555, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_mono, convert_xrgb8888_gen_params),
{}
};
static struct kunit_suite drm_format_helper_test_suite = {
.name = "drm_format_helper_test",
.test_cases = drm_format_helper_test_cases,
};
kunit_test_suite(drm_format_helper_test_suite);
MODULE_DESCRIPTION("KUnit tests for the drm_format_helper APIs");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("José Expósito <[email protected]>");
| linux-master | drivers/gpu/drm/tests/drm_format_helper_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for the drm_mm range manager
*
* Copyright (c) 2022 Arthur Grillo <[email protected]>
*/
#include <kunit/test.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/ktime.h>
#include <drm/drm_mm.h>
#include "../lib/drm_random.h"
static unsigned int random_seed;
static unsigned int max_iterations = 8192;
static unsigned int max_prime = 128;
enum {
BEST,
BOTTOMUP,
TOPDOWN,
EVICT,
};
static const struct insert_mode {
const char *name;
enum drm_mm_insert_mode mode;
} insert_modes[] = {
[BEST] = { "best", DRM_MM_INSERT_BEST },
[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
{}
}, evict_modes[] = {
{ "bottom-up", DRM_MM_INSERT_LOW },
{ "top-down", DRM_MM_INSERT_HIGH },
{}
};
static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
{
struct drm_mm_node *hole;
u64 hole_start, __always_unused hole_end;
unsigned long count;
count = 0;
drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
count++;
if (count) {
KUNIT_FAIL(test,
"Expected to find no holes (after reserve), found %lu instead\n", count);
return false;
}
drm_mm_for_each_node(hole, mm) {
if (drm_mm_hole_follows(hole)) {
KUNIT_FAIL(test, "Hole follows node, expected none!\n");
return false;
}
}
return true;
}
static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
{
struct drm_mm_node *hole;
u64 hole_start, hole_end;
unsigned long count;
bool ok = true;
if (end <= start)
return true;
count = 0;
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (start != hole_start || end != hole_end) {
if (ok)
KUNIT_FAIL(test,
"empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
hole_start, hole_end, start, end);
ok = false;
}
count++;
}
if (count != 1) {
KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count);
ok = false;
}
return ok;
}
static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
{
struct drm_mm_node *node, *check, *found;
unsigned long n;
u64 addr;
if (!assert_no_holes(test, mm))
return false;
n = 0;
addr = 0;
drm_mm_for_each_node(node, mm) {
if (node->start != addr) {
KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
n, addr, node->start);
return false;
}
if (node->size != size) {
KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
n, size, node->size);
return false;
}
if (drm_mm_hole_follows(node)) {
KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
return false;
}
found = NULL;
drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
if (node != check) {
KUNIT_FAIL(test,
"lookup return wrong node, expected start %llx, found %llx\n",
node->start, check->start);
return false;
}
found = check;
}
if (!found) {
KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
return false;
}
addr += size;
n++;
}
return true;
}
static u64 misalignment(struct drm_mm_node *node, u64 alignment)
{
u64 rem;
if (!alignment)
return 0;
div64_u64_rem(node->start, alignment, &rem);
return rem;
}
static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
u64 size, u64 alignment, unsigned long color)
{
bool ok = true;
if (!drm_mm_node_allocated(node) || node->mm != mm) {
KUNIT_FAIL(test, "node not allocated\n");
ok = false;
}
if (node->size != size) {
KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n",
node->size, size);
ok = false;
}
if (misalignment(node, alignment)) {
KUNIT_FAIL(test,
"node is misaligned, start %llx rem %llu, expected alignment %llu\n",
node->start, misalignment(node, alignment), alignment);
ok = false;
}
if (node->color != color) {
KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n",
node->color, color);
ok = false;
}
return ok;
}
static void drm_test_mm_init(struct kunit *test)
{
const unsigned int size = 4096;
struct drm_mm mm;
struct drm_mm_node tmp;
/* Start with some simple checks on initialising the struct drm_mm */
memset(&mm, 0, sizeof(mm));
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm),
"zeroed mm claims to be initialized\n");
memset(&mm, 0xff, sizeof(mm));
drm_mm_init(&mm, 0, size);
if (!drm_mm_initialized(&mm)) {
KUNIT_FAIL(test, "mm claims not to be initialized\n");
goto out;
}
if (!drm_mm_clean(&mm)) {
KUNIT_FAIL(test, "mm not empty on creation\n");
goto out;
}
/* After creation, it should all be one massive hole */
if (!assert_one_hole(test, &mm, 0, size)) {
KUNIT_FAIL(test, "");
goto out;
}
memset(&tmp, 0, sizeof(tmp));
tmp.start = 0;
tmp.size = size;
if (drm_mm_reserve_node(&mm, &tmp)) {
KUNIT_FAIL(test, "failed to reserve whole drm_mm\n");
goto out;
}
/* After filling the range entirely, there should be no holes */
if (!assert_no_holes(test, &mm)) {
KUNIT_FAIL(test, "");
goto out;
}
/* And then after emptying it again, the massive hole should be back */
drm_mm_remove_node(&tmp);
if (!assert_one_hole(test, &mm, 0, size)) {
KUNIT_FAIL(test, "");
goto out;
}
out:
drm_mm_takedown(&mm);
}
static void drm_test_mm_debug(struct kunit *test)
{
struct drm_mm mm;
struct drm_mm_node nodes[2];
/* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm.
*/
drm_mm_init(&mm, 0, 4096);
memset(nodes, 0, sizeof(nodes));
nodes[0].start = 512;
nodes[0].size = 1024;
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]),
"failed to reserve node[0] {start=%lld, size=%lld)\n",
nodes[0].start, nodes[0].size);
nodes[1].size = 1024;
nodes[1].start = 4096 - 512 - nodes[1].size;
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
"failed to reserve node[0] {start=%lld, size=%lld)\n",
nodes[0].start, nodes[0].size);
}
static struct drm_mm_node *set_node(struct drm_mm_node *node,
u64 start, u64 size)
{
node->start = start;
node->size = size;
return node;
}
static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
{
int err;
err = drm_mm_reserve_node(mm, node);
if (likely(err == -ENOSPC))
return true;
if (!err) {
KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
node->start, node->size);
drm_mm_remove_node(node);
} else {
KUNIT_FAIL(test,
"impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
err, -ENOSPC, node->start, node->size);
}
return false;
}
static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
unsigned int count,
u64 size)
{
const struct boundary {
u64 start, size;
const char *name;
} boundaries[] = {
#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
B(0, 0),
B(-size, 0),
B(size, 0),
B(size * count, 0),
B(-size, size),
B(-size, -size),
B(-size, 2 * size),
B(0, -size),
B(size, -size),
B(count * size, size),
B(count * size, -size),
B(count * size, count * size),
B(count * size, -count * size),
B(count * size, -(count + 1) * size),
B((count + 1) * size, size),
B((count + 1) * size, -size),
B((count + 1) * size, -2 * size),
#undef B
};
struct drm_mm_node tmp = {};
int n;
for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
boundaries[n].size))) {
KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
n, boundaries[n].name, count, size);
return false;
}
}
return true;
}
static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
{
DRM_RND_STATE(prng, random_seed);
struct drm_mm mm;
struct drm_mm_node tmp, *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
int ret, err;
/* For exercising drm_mm_reserve_node(), we want to check that
* reservations outside of the drm_mm range are rejected, and to
* overlapping and otherwise already occupied ranges. Afterwards,
* the tree and nodes should be intact.
*/
DRM_MM_BUG_ON(!count);
DRM_MM_BUG_ON(!size);
ret = -ENOMEM;
order = drm_random_order(count, &prng);
if (!order)
goto err;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
if (!check_reserve_boundaries(test, &mm, count, size))
goto out;
for (n = 0; n < count; n++) {
nodes[n].start = order[n] * size;
nodes[n].size = size;
err = drm_mm_reserve_node(&mm, &nodes[n]);
if (err) {
KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
n, nodes[n].start);
ret = err;
goto out;
}
if (!drm_mm_node_allocated(&nodes[n])) {
KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
n, nodes[n].start);
goto out;
}
if (!expect_reserve_fail(test, &mm, &nodes[n]))
goto out;
}
/* After random insertion the nodes should be in order */
if (!assert_continuous(test, &mm, size))
goto out;
/* Repeated use should then fail */
drm_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
goto out;
/* Remove and reinsert should work */
drm_mm_remove_node(&nodes[order[n]]);
err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
if (err) {
KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
n, nodes[n].start);
ret = err;
goto out;
}
}
if (!assert_continuous(test, &mm, size))
goto out;
/* Overlapping use should then fail */
for (n = 0; n < count; n++) {
if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
goto out;
}
for (n = 0; n < count; n++) {
if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
goto out;
}
/* Remove several, reinsert, check full */
for_each_prime_number(n, min(max_prime, count)) {
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
drm_mm_remove_node(node);
}
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
err = drm_mm_reserve_node(&mm, node);
if (err) {
KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
m, n, node->start);
ret = err;
goto out;
}
}
o += n;
if (!assert_continuous(test, &mm, size))
goto out;
}
ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
kfree(order);
err:
return ret;
}
static void drm_test_mm_reserve(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
int n;
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
cond_resched();
}
}
static bool expect_insert(struct kunit *test, struct drm_mm *mm,
struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
const struct insert_mode *mode)
{
int err;
err = drm_mm_insert_node_generic(mm, node,
size, alignment, color,
mode->mode);
if (err) {
KUNIT_FAIL(test,
"insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
size, alignment, color, mode->name, err);
return false;
}
if (!assert_node(test, node, mm, size, alignment, color)) {
drm_mm_remove_node(node);
return false;
}
return true;
}
static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
{
struct drm_mm_node tmp = {};
int err;
err = drm_mm_insert_node(mm, &tmp, size);
if (likely(err == -ENOSPC))
return true;
if (!err) {
KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
tmp.start, tmp.size);
drm_mm_remove_node(&tmp);
} else {
KUNIT_FAIL(test,
"impossible insert failed with wrong error %d [expected %d], size %llu\n",
err, -ENOSPC, size);
}
return false;
}
static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
{
DRM_RND_STATE(prng, random_seed);
const struct insert_mode *mode;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
int ret;
/* Fill a range with lots of nodes, check it doesn't fail too early */
DRM_MM_BUG_ON(!count);
DRM_MM_BUG_ON(!size);
ret = -ENOMEM;
nodes = vmalloc(array_size(count, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(count, &prng);
if (!order)
goto err_nodes;
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
for (mode = insert_modes; mode->name; mode++) {
for (n = 0; n < count; n++) {
struct drm_mm_node tmp;
node = replace ? &tmp : &nodes[n];
memset(node, 0, sizeof(*node));
if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
mode->name, size, n);
goto out;
}
if (replace) {
drm_mm_replace_node(&tmp, &nodes[n]);
if (drm_mm_node_allocated(&tmp)) {
KUNIT_FAIL(test,
"replaced old-node still allocated! step %d\n",
n);
goto out;
}
if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
KUNIT_FAIL(test,
"replaced node did not inherit parameters, size %llu step %d\n",
size, n);
goto out;
}
if (tmp.start != nodes[n].start) {
KUNIT_FAIL(test,
"replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
tmp.start, size, nodes[n].start, nodes[n].size);
goto out;
}
}
}
/* After random insertion the nodes should be in order */
if (!assert_continuous(test, &mm, size))
goto out;
/* Repeated use should then fail */
if (!expect_insert_fail(test, &mm, size))
goto out;
/* Remove one and reinsert, as the only hole it should refill itself */
for (n = 0; n < count; n++) {
u64 addr = nodes[n].start;
drm_mm_remove_node(&nodes[n]);
if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
mode->name, size, n);
goto out;
}
if (nodes[n].start != addr) {
KUNIT_FAIL(test,
"%s reinsert node moved, step %d, expected %llx, found %llx\n",
mode->name, n, addr, nodes[n].start);
goto out;
}
if (!assert_continuous(test, &mm, size))
goto out;
}
/* Remove several, reinsert, check full */
for_each_prime_number(n, min(max_prime, count)) {
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
drm_mm_remove_node(node);
}
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
KUNIT_FAIL(test,
"%s multiple reinsert failed, size %llu step %d\n",
mode->name, size, n);
goto out;
}
}
o += n;
if (!assert_continuous(test, &mm, size))
goto out;
if (!expect_insert_fail(test, &mm, size))
goto out;
}
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
cond_resched();
}
ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
return ret;
}
static void drm_test_mm_insert(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
unsigned int n;
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
cond_resched();
}
}
static void drm_test_mm_replace(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
unsigned int n;
/* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
* then replacing it with the intended node. We want to check that
* the tree is intact and all the information we need is carried
* across to the target node.
*/
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
cond_resched();
}
}
static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 range_start, u64 range_end, const struct insert_mode *mode)
{
int err;
err = drm_mm_insert_node_in_range(mm, node,
size, alignment, color,
range_start, range_end,
mode->mode);
if (err) {
KUNIT_FAIL(test,
"insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
size, alignment, color, mode->name,
range_start, range_end, err);
return false;
}
if (!assert_node(test, node, mm, size, alignment, color)) {
drm_mm_remove_node(node);
return false;
}
return true;
}
static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
u64 size, u64 range_start, u64 range_end)
{
struct drm_mm_node tmp = {};
int err;
err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
0);
if (likely(err == -ENOSPC))
return true;
if (!err) {
KUNIT_FAIL(test,
"impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
tmp.start, tmp.size, range_start, range_end);
drm_mm_remove_node(&tmp);
} else {
KUNIT_FAIL(test,
"impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
err, -ENOSPC, size, range_start, range_end);
}
return false;
}
static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
u64 size, u64 start, u64 end)
{
struct drm_mm_node *node;
unsigned int n;
if (!expect_insert_in_range_fail(test, mm, size, start, end))
return false;
n = div64_u64(start + size - 1, size);
drm_mm_for_each_node(node, mm) {
if (node->start < start || node->start + node->size > end) {
KUNIT_FAIL(test,
"node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
n, node->start, node->start + node->size, start, end);
return false;
}
if (node->start != n * size) {
KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
n, n * size, node->start);
return false;
}
if (node->size != size) {
KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
n, size, node->size);
return false;
}
if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
return false;
}
n++;
}
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
if (drm_mm_node_allocated(node)) {
KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start);
return false;
}
}
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
if (drm_mm_node_allocated(node)) {
KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end);
return false;
}
}
return true;
}
static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
u64 start, u64 end)
{
const struct insert_mode *mode;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int n, start_n, end_n;
int ret;
DRM_MM_BUG_ON(!count);
DRM_MM_BUG_ON(!size);
DRM_MM_BUG_ON(end <= start);
/* Very similar to __drm_test_mm_insert(), but now instead of populating the
* full range of the drm_mm, we try to fill a small portion of it.
*/
ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
start_n = div64_u64(start + size - 1, size);
end_n = div64_u64(end - size, size);
for (mode = insert_modes; mode->name; mode++) {
for (n = start_n; n <= end_n; n++) {
if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
start, end, mode)) {
KUNIT_FAIL(test,
"%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
mode->name, size, n, start_n, end_n, start, end);
goto out;
}
}
if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
KUNIT_FAIL(test,
"%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
mode->name, start, end, size);
goto out;
}
/* Remove one and reinsert, it should refill itself */
for (n = start_n; n <= end_n; n++) {
u64 addr = nodes[n].start;
drm_mm_remove_node(&nodes[n]);
if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
start, end, mode)) {
KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
goto out;
}
if (nodes[n].start != addr) {
KUNIT_FAIL(test,
"%s reinsert node moved, step %d, expected %llx, found %llx\n",
mode->name, n, addr, nodes[n].start);
goto out;
}
}
if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
KUNIT_FAIL(test,
"%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
mode->name, start, end, size);
goto out;
}
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
cond_resched();
}
ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
return ret;
}
static int insert_outside_range(struct kunit *test)
{
struct drm_mm mm;
const unsigned int start = 1024;
const unsigned int end = 2048;
const unsigned int size = end - start;
drm_mm_init(&mm, start, size);
if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
return -EINVAL;
if (!expect_insert_in_range_fail(test, &mm, size,
start - size / 2, start + (size + 1) / 2))
return -EINVAL;
if (!expect_insert_in_range_fail(test, &mm, size,
end - (size + 1) / 2, end + size / 2))
return -EINVAL;
if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
return -EINVAL;
drm_mm_takedown(&mm);
return 0;
}
static void drm_test_mm_insert_range(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
unsigned int n;
/* Check that requests outside the bounds of drm_mm are rejected. */
KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
for_each_prime_number_from(n, 1, 50) {
const u64 size = BIT_ULL(n);
const u64 max = count * size;
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
max / 2, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
max / 4 + 1, 3 * max / 4 - 1));
cond_resched();
}
}
static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
unsigned int num_insert, const struct insert_mode *mode)
{
unsigned int size = 4096;
unsigned int i;
for (i = 0; i < num_insert; i++) {
if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
KUNIT_FAIL(test, "%s insert failed\n", mode->name);
return -EINVAL;
}
}
/* introduce fragmentation by freeing every other node */
for (i = 0; i < num_insert; i++) {
if (i % 2 == 0)
drm_mm_remove_node(&nodes[i]);
}
return 0;
}
static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
unsigned int num_insert, struct drm_mm_node *nodes,
const struct insert_mode *mode)
{
unsigned int size = 8192;
ktime_t start;
unsigned int i;
start = ktime_get();
for (i = 0; i < num_insert; i++) {
if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
KUNIT_FAIL(test, "%s insert failed\n", mode->name);
return 0;
}
}
return ktime_to_ns(ktime_sub(ktime_get(), start));
}
static void drm_test_mm_frag(struct kunit *test)
{
struct drm_mm mm;
const struct insert_mode *mode;
struct drm_mm_node *nodes, *node, *next;
unsigned int insert_size = 10000;
unsigned int scale_factor = 4;
/* We need 4 * insert_size nodes to hold intermediate allocated
* drm_mm nodes.
* 1 times for prepare_frag()
* 1 times for get_insert_time()
* 2 times for get_insert_time()
*/
nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
/* For BOTTOMUP and TOPDOWN, we first fragment the
* address space using prepare_frag() and then try to verify
* that insertions scale quadratically from 10k to 20k insertions
*/
drm_mm_init(&mm, 1, U64_MAX - 2);
for (mode = insert_modes; mode->name; mode++) {
u64 insert_time1, insert_time2;
if (mode->mode != DRM_MM_INSERT_LOW &&
mode->mode != DRM_MM_INSERT_HIGH)
continue;
if (prepare_frag(test, &mm, nodes, insert_size, mode))
goto err;
insert_time1 = get_insert_time(test, &mm, insert_size,
nodes + insert_size, mode);
if (insert_time1 == 0)
goto err;
insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
nodes + insert_size * 2, mode);
if (insert_time2 == 0)
goto err;
kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
if (insert_time2 > (scale_factor * insert_time1)) {
KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
mode->name, insert_time2 - (scale_factor * insert_time1));
goto err;
}
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
}
err:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
}
static void drm_test_mm_align(struct kunit *test)
{
const struct insert_mode *mode;
const unsigned int max_count = min(8192u, max_prime);
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int prime;
/* For each of the possible insertion modes, we pick a few
* arbitrary alignments and check that the inserted node
* meets our requirements.
*/
nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
drm_mm_init(&mm, 1, U64_MAX - 2);
for (mode = insert_modes; mode->name; mode++) {
unsigned int i = 0;
for_each_prime_number_from(prime, 1, max_count) {
u64 size = next_prime_number(prime);
if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
KUNIT_FAIL(test, "%s insert failed with alignment=%d",
mode->name, prime);
goto out;
}
i++;
}
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
}
static void drm_test_mm_align_pot(struct kunit *test, int max)
{
struct drm_mm mm;
struct drm_mm_node *node, *next;
int bit;
/* Check that we can align to the full u64 address space */
drm_mm_init(&mm, 1, U64_MAX - 2);
for (bit = max - 1; bit; bit--) {
u64 align, size;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
KUNIT_FAIL(test, "failed to allocate node");
goto out;
}
align = BIT_ULL(bit);
size = BIT_ULL(bit - 1) + 1;
if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
goto out;
}
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm) {
drm_mm_remove_node(node);
kfree(node);
}
drm_mm_takedown(&mm);
}
static void drm_test_mm_align32(struct kunit *test)
{
drm_test_mm_align_pot(test, 32);
}
static void drm_test_mm_align64(struct kunit *test)
{
drm_test_mm_align_pot(test, 64);
}
static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
{
kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
}
static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
{
u64 hole_start, hole_end;
struct drm_mm_node *hole;
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL;
if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
hole_start, hole_end, hole_end - hole_start, node2);
kfree(node2);
kfree(node1);
if (!--count)
break;
}
}
struct evict_node {
struct drm_mm_node node;
struct list_head link;
};
static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
struct evict_node *nodes, unsigned int *order, unsigned int count,
bool use_color, struct list_head *evict_list)
{
struct evict_node *e, *en;
unsigned int i;
for (i = 0; i < count; i++) {
e = &nodes[order ? order[i] : i];
list_add(&e->link, evict_list);
if (drm_mm_scan_add_block(scan, &e->node))
break;
}
list_for_each_entry_safe(e, en, evict_list, link) {
if (!drm_mm_scan_remove_block(scan, &e->node))
list_del(&e->link);
}
if (list_empty(evict_list)) {
KUNIT_FAIL(test,
"Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
scan->size, count, scan->alignment, scan->color);
return false;
}
list_for_each_entry(e, evict_list, link)
drm_mm_remove_node(&e->node);
if (use_color) {
struct drm_mm_node *node;
while ((node = drm_mm_scan_color_evict(scan))) {
e = container_of(node, typeof(*e), node);
drm_mm_remove_node(&e->node);
list_add(&e->link, evict_list);
}
} else {
if (drm_mm_scan_color_evict(scan)) {
KUNIT_FAIL(test,
"drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
return false;
}
}
return true;
}
static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
unsigned int total_size, struct evict_node *nodes)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
struct evict_node *e;
struct drm_mm_node *node;
unsigned int n;
drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
for (n = 0; n < total_size; n++) {
e = &nodes[n];
list_add(&e->link, &evict_list);
drm_mm_scan_add_block(&scan, &e->node);
}
list_for_each_entry(e, &evict_list, link)
drm_mm_scan_remove_block(&scan, &e->node);
for (n = 0; n < total_size; n++) {
e = &nodes[n];
if (!drm_mm_node_allocated(&e->node)) {
KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
return false;
}
e->link.next = NULL;
}
drm_mm_for_each_node(node, mm) {
e = container_of(node, typeof(*e), node);
e->link.next = &e->link;
}
for (n = 0; n < total_size; n++) {
e = &nodes[n];
if (!e->link.next) {
KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
return false;
}
}
return assert_continuous(test, mm, nodes[0].node.size);
}
static bool evict_everything(struct kunit *test, struct drm_mm *mm,
unsigned int total_size, struct evict_node *nodes)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
struct evict_node *e;
unsigned int n;
int err;
drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
for (n = 0; n < total_size; n++) {
e = &nodes[n];
list_add(&e->link, &evict_list);
if (drm_mm_scan_add_block(&scan, &e->node))
break;
}
err = 0;
list_for_each_entry(e, &evict_list, link) {
if (!drm_mm_scan_remove_block(&scan, &e->node)) {
if (!err) {
KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
e->node.start);
err = -EINVAL;
}
}
}
if (err)
return false;
list_for_each_entry(e, &evict_list, link)
drm_mm_remove_node(&e->node);
if (!assert_one_hole(test, mm, 0, total_size))
return false;
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
e->node.start);
return false;
}
}
return assert_continuous(test, mm, nodes[0].node.size);
}
static int evict_something(struct kunit *test, struct drm_mm *mm,
u64 range_start, u64 range_end, struct evict_node *nodes,
unsigned int *order, unsigned int count, unsigned int size,
unsigned int alignment, const struct insert_mode *mode)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
struct evict_node *e;
struct drm_mm_node tmp;
int err;
drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
range_end, mode->mode);
if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
return -EINVAL;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
DRM_MM_INSERT_EVICT);
if (err) {
KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
size, alignment);
show_scan(test, &scan);
show_holes(test, mm, 3);
return err;
}
if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
KUNIT_FAIL(test,
"Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
tmp.start, tmp.size, range_start, range_end);
err = -EINVAL;
}
if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
drm_mm_hole_follows(&tmp)) {
KUNIT_FAIL(test,
"Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
tmp.size, size, alignment, misalignment(&tmp, alignment),
tmp.start, drm_mm_hole_follows(&tmp));
err = -EINVAL;
}
drm_mm_remove_node(&tmp);
if (err)
return err;
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
e->node.start);
return err;
}
}
if (!assert_continuous(test, mm, nodes[0].node.size)) {
KUNIT_FAIL(test, "range is no longer continuous\n");
return -EINVAL;
}
return 0;
}
static void drm_test_mm_evict(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int size = 8192;
const struct insert_mode *mode;
struct drm_mm mm;
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
/* Here we populate a full drm_mm and then try and insert a new node
* by evicting other nodes in a random order. The drm_mm_scan should
* pick the first matching hole it finds from the random list. We
* repeat that for different allocation strategies, alignments and
* sizes to try and stress the hole finder.
*/
nodes = vzalloc(array_size(size, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(size, &prng);
if (!order)
goto err_nodes;
drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) {
if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
/* First check that using the scanner doesn't break the mm */
if (!evict_nothing(test, &mm, size, nodes)) {
KUNIT_FAIL(test, "evict_nothing() failed\n");
goto out;
}
if (!evict_everything(test, &mm, size, nodes)) {
KUNIT_FAIL(test, "evict_everything() failed\n");
goto out;
}
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= size; n <<= 1) {
drm_random_reorder(order, size, &prng);
if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
mode)) {
KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
mode->name, n);
goto out;
}
}
for (n = 1; n < size; n <<= 1) {
drm_random_reorder(order, size, &prng);
if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
size / 2, n, mode)) {
KUNIT_FAIL(test,
"%s evict_something(size=%u, alignment=%u) failed\n",
mode->name, size / 2, n);
goto out;
}
}
for_each_prime_number_from(n, 1, min(size, max_prime)) {
unsigned int nsize = (size - n + 1) / 2;
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, size, &prng);
if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
nsize, n, mode)) {
KUNIT_FAIL(test,
"%s evict_something(size=%u, alignment=%u) failed\n",
mode->name, nsize, n);
goto out;
}
}
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
}
static void drm_test_mm_evict_range(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int size = 8192;
const unsigned int range_size = size / 2;
const unsigned int range_start = size / 4;
const unsigned int range_end = range_start + range_size;
const struct insert_mode *mode;
struct drm_mm mm;
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
/* Like drm_test_mm_evict() but now we are limiting the search to a
* small portion of the full drm_mm.
*/
nodes = vzalloc(array_size(size, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(size, &prng);
if (!order)
goto err_nodes;
drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) {
if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, size, &prng);
if (evict_something(test, &mm, range_start, range_end, nodes,
order, size, n, 1, mode)) {
KUNIT_FAIL(test,
"%s evict_something(size=%u) failed with range [%u, %u]\n",
mode->name, n, range_start, range_end);
goto out;
}
}
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, size, &prng);
if (evict_something(test, &mm, range_start, range_end, nodes,
order, size, range_size / 2, n, mode)) {
KUNIT_FAIL(test,
"%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
mode->name, range_size / 2, n, range_start, range_end);
goto out;
}
}
for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
unsigned int nsize = (range_size - n + 1) / 2;
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, size, &prng);
if (evict_something(test, &mm, range_start, range_end, nodes,
order, size, nsize, n, mode)) {
KUNIT_FAIL(test,
"%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
mode->name, nsize, n, range_start, range_end);
goto out;
}
}
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
}
static unsigned int node_index(const struct drm_mm_node *node)
{
return div64_u64(node->start, node->size);
}
static void drm_test_mm_topdown(struct kunit *test)
{
const struct insert_mode *topdown = &insert_modes[TOPDOWN];
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
unsigned long *bitmap;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
/* When allocating top-down, we expect to be returned a node
* from a suitable hole at the top of the drm_mm. We check that
* the returned node does match the highest available slot.
*/
nodes = vzalloc(array_size(count, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
order = drm_random_order(count, &prng);
if (!order)
goto err_bitmap;
for (size = 1; size <= 64; size <<= 1) {
drm_mm_init(&mm, 0, size * count);
for (n = 0; n < count; n++) {
if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
goto out;
}
if (drm_mm_hole_follows(&nodes[n])) {
KUNIT_FAIL(test,
"hole after topdown insert %d, start=%llx\n, size=%u",
n, nodes[n].start, size);
goto out;
}
if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
goto out;
}
if (!assert_continuous(test, &mm, size))
goto out;
drm_random_reorder(order, count, &prng);
for_each_prime_number_from(n, 1, min(count, max_prime)) {
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
drm_mm_remove_node(node);
__set_bit(node_index(node), bitmap);
}
for (m = 0; m < n; m++) {
unsigned int last;
node = &nodes[order[(o + m) % count]];
if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
goto out;
}
if (drm_mm_hole_follows(node)) {
KUNIT_FAIL(test,
"hole after topdown insert %d/%d, start=%llx\n",
m, n, node->start);
goto out;
}
last = find_last_bit(bitmap, count);
if (node_index(node) != last) {
KUNIT_FAIL(test,
"node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
m, n, size, last, node_index(node));
goto out;
}
__clear_bit(last, bitmap);
}
DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
o += n;
}
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
bitmap_free(bitmap);
err_nodes:
vfree(nodes);
}
static void drm_test_mm_bottomup(struct kunit *test)
{
const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
unsigned long *bitmap;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
/* Like drm_test_mm_topdown, but instead of searching for the last hole,
* we search for the first.
*/
nodes = vzalloc(array_size(count, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
order = drm_random_order(count, &prng);
if (!order)
goto err_bitmap;
for (size = 1; size <= 64; size <<= 1) {
drm_mm_init(&mm, 0, size * count);
for (n = 0; n < count; n++) {
if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
KUNIT_FAIL(test,
"bottomup insert failed, size %u step %d\n", size, n);
goto out;
}
if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
goto out;
}
if (!assert_continuous(test, &mm, size))
goto out;
drm_random_reorder(order, count, &prng);
for_each_prime_number_from(n, 1, min(count, max_prime)) {
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
drm_mm_remove_node(node);
__set_bit(node_index(node), bitmap);
}
for (m = 0; m < n; m++) {
unsigned int first;
node = &nodes[order[(o + m) % count]];
if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
goto out;
}
first = find_first_bit(bitmap, count);
if (node_index(node) != first) {
KUNIT_FAIL(test,
"node %d/%d not inserted into bottom hole, expected %d, found %d\n",
m, n, first, node_index(node));
goto out;
}
__clear_bit(first, bitmap);
}
DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
o += n;
}
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
bitmap_free(bitmap);
err_nodes:
vfree(nodes);
}
static void drm_test_mm_once(struct kunit *test, unsigned int mode)
{
struct drm_mm mm;
struct drm_mm_node rsvd_lo, rsvd_hi, node;
drm_mm_init(&mm, 0, 7);
memset(&rsvd_lo, 0, sizeof(rsvd_lo));
rsvd_lo.start = 1;
rsvd_lo.size = 1;
if (drm_mm_reserve_node(&mm, &rsvd_lo)) {
KUNIT_FAIL(test, "Could not reserve low node\n");
goto err;
}
memset(&rsvd_hi, 0, sizeof(rsvd_hi));
rsvd_hi.start = 5;
rsvd_hi.size = 1;
if (drm_mm_reserve_node(&mm, &rsvd_hi)) {
KUNIT_FAIL(test, "Could not reserve low node\n");
goto err_lo;
}
if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n");
goto err_hi;
}
memset(&node, 0, sizeof(node));
if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
KUNIT_FAIL(test, "Could not insert the node into the available hole!\n");
goto err_hi;
}
drm_mm_remove_node(&node);
err_hi:
drm_mm_remove_node(&rsvd_hi);
err_lo:
drm_mm_remove_node(&rsvd_lo);
err:
drm_mm_takedown(&mm);
}
static void drm_test_mm_lowest(struct kunit *test)
{
drm_test_mm_once(test, DRM_MM_INSERT_LOW);
}
static void drm_test_mm_highest(struct kunit *test)
{
drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
}
static void separate_adjacent_colors(const struct drm_mm_node *node,
unsigned long color, u64 *start, u64 *end)
{
if (drm_mm_node_allocated(node) && node->color != color)
++*start;
node = list_next_entry(node, node_list);
if (drm_mm_node_allocated(node) && node->color != color)
--*end;
}
static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
drm_mm_node_allocated(list_next_entry(node, node_list))) {
KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
list_next_entry(node, node_list)->start,
list_next_entry(node, node_list)->size);
return true;
}
return false;
}
static void drm_test_mm_color(struct kunit *test)
{
const unsigned int count = min(4096u, max_iterations);
const struct insert_mode *mode;
struct drm_mm mm;
struct drm_mm_node *node, *nn;
unsigned int n;
/* Color adjustment complicates everything. First we just check
* that when we insert a node we apply any color_adjustment callback.
* The callback we use should ensure that there is a gap between
* any two nodes, and so after each insertion we check that those
* holes are inserted and that they are preserved.
*/
drm_mm_init(&mm, 0, U64_MAX);
for (n = 1; n <= count; n++) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
goto out;
if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
KUNIT_FAIL(test, "insert failed, step %d\n", n);
kfree(node);
goto out;
}
}
drm_mm_for_each_node_safe(node, nn, &mm) {
if (node->color != node->size) {
KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
node->size, node->color);
goto out;
}
drm_mm_remove_node(node);
kfree(node);
}
/* Now, let's start experimenting with applying a color callback */
mm.color_adjust = separate_adjacent_colors;
for (mode = insert_modes; mode->name; mode++) {
u64 last;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
goto out;
node->size = 1 + 2 * count;
node->color = node->size;
if (drm_mm_reserve_node(&mm, node)) {
KUNIT_FAIL(test, "initial reserve failed!\n");
goto out;
}
last = node->start + node->size;
for (n = 1; n <= count; n++) {
int rem;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
goto out;
node->start = last;
node->size = n + count;
node->color = node->size;
if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
goto out;
}
node->start += n + 1;
rem = misalignment(node, n + count);
node->start += n + count - rem;
if (drm_mm_reserve_node(&mm, node)) {
KUNIT_FAIL(test, "reserve %d failed", n);
goto out;
}
last = node->start + node->size;
}
for (n = 1; n <= count; n++) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
goto out;
if (!expect_insert(test, &mm, node, n, n, n, mode)) {
KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
kfree(node);
goto out;
}
}
drm_mm_for_each_node_safe(node, nn, &mm) {
u64 rem;
if (node->color != node->size) {
KUNIT_FAIL(test,
"%s invalid color stored: expected %lld, found %ld\n",
mode->name, node->size, node->color);
goto out;
}
if (colors_abutt(test, node))
goto out;
div64_u64_rem(node->start, node->size, &rem);
if (rem) {
KUNIT_FAIL(test,
"%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
mode->name, node->start, node->size, rem);
goto out;
}
drm_mm_remove_node(node);
kfree(node);
}
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, nn, &mm) {
drm_mm_remove_node(node);
kfree(node);
}
drm_mm_takedown(&mm);
}
static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
u64 range_end, struct evict_node *nodes, unsigned int *order,
unsigned int count, unsigned int size, unsigned int alignment,
unsigned long color, const struct insert_mode *mode)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
struct evict_node *e;
struct drm_mm_node tmp;
int err;
drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
range_end, mode->mode);
if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
return -EINVAL;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
DRM_MM_INSERT_EVICT);
if (err) {
KUNIT_FAIL(test,
"Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
size, alignment, color, err);
show_scan(test, &scan);
show_holes(test, mm, 3);
return err;
}
if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
KUNIT_FAIL(test,
"Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
tmp.start, tmp.size, range_start, range_end);
err = -EINVAL;
}
if (colors_abutt(test, &tmp))
err = -EINVAL;
if (!assert_node(test, &tmp, mm, size, alignment, color)) {
KUNIT_FAIL(test,
"Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
err = -EINVAL;
}
drm_mm_remove_node(&tmp);
if (err)
return err;
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
e->node.start);
return err;
}
}
cond_resched();
return 0;
}
static void drm_test_mm_color_evict(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int total_size = min(8192u, max_iterations);
const struct insert_mode *mode;
unsigned long color = 0;
struct drm_mm mm;
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
/* Check that the drm_mm_scan also honours color adjustment when
* choosing its victims to create a hole. Our color_adjust does not
* allow two nodes to be placed together without an intervening hole
* enlarging the set of victims that must be evicted.
*/
nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(total_size, &prng);
if (!order)
goto err_nodes;
drm_mm_init(&mm, 0, 2 * total_size - 1);
mm.color_adjust = separate_adjacent_colors;
for (n = 0; n < total_size; n++) {
if (!expect_insert(test, &mm, &nodes[n].node,
1, 0, color++,
&insert_modes[0])) {
KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= total_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
n, 1, color++, mode)) {
KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
goto out;
}
}
for (n = 1; n < total_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
total_size / 2, n, color++, mode)) {
KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
mode->name, total_size / 2, n);
goto out;
}
}
for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
unsigned int nsize = (total_size - n + 1) / 2;
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, total_size, &prng);
if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
nsize, n, color++, mode)) {
KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
mode->name, nsize, n);
goto out;
}
}
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
}
static void drm_test_mm_color_evict_range(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int total_size = 8192;
const unsigned int range_size = total_size / 2;
const unsigned int range_start = total_size / 4;
const unsigned int range_end = range_start + range_size;
const struct insert_mode *mode;
unsigned long color = 0;
struct drm_mm mm;
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
/* Like drm_test_mm_color_evict(), but limited to small portion of the full
* drm_mm range.
*/
nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(total_size, &prng);
if (!order)
goto err_nodes;
drm_mm_init(&mm, 0, 2 * total_size - 1);
mm.color_adjust = separate_adjacent_colors;
for (n = 0; n < total_size; n++) {
if (!expect_insert(test, &mm, &nodes[n].node,
1, 0, color++,
&insert_modes[0])) {
KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, range_size, &prng);
if (evict_color(test, &mm, range_start, range_end, nodes, order,
total_size, n, 1, color++, mode)) {
KUNIT_FAIL(test,
"%s evict_color(size=%u) failed for range [%x, %x]\n",
mode->name, n, range_start, range_end);
goto out;
}
}
for (n = 1; n < range_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
if (evict_color(test, &mm, range_start, range_end, nodes, order,
total_size, range_size / 2, n, color++, mode)) {
KUNIT_FAIL(test,
"%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
mode->name, total_size / 2, n, range_start, range_end);
goto out;
}
}
for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
unsigned int nsize = (range_size - n + 1) / 2;
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, total_size, &prng);
if (evict_color(test, &mm, range_start, range_end, nodes, order,
total_size, nsize, n, color++, mode)) {
KUNIT_FAIL(test,
"%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
mode->name, nsize, n, range_start, range_end);
goto out;
}
}
cond_resched();
}
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
}
static int drm_mm_suite_init(struct kunit_suite *suite)
{
while (!random_seed)
random_seed = get_random_u32();
kunit_info(suite,
"Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n",
random_seed, max_iterations, max_prime);
return 0;
}
module_param(random_seed, uint, 0400);
module_param(max_iterations, uint, 0400);
module_param(max_prime, uint, 0400);
static struct kunit_case drm_mm_tests[] = {
KUNIT_CASE(drm_test_mm_init),
KUNIT_CASE(drm_test_mm_debug),
KUNIT_CASE(drm_test_mm_reserve),
KUNIT_CASE(drm_test_mm_insert),
KUNIT_CASE(drm_test_mm_replace),
KUNIT_CASE(drm_test_mm_insert_range),
KUNIT_CASE(drm_test_mm_frag),
KUNIT_CASE(drm_test_mm_align),
KUNIT_CASE(drm_test_mm_align32),
KUNIT_CASE(drm_test_mm_align64),
KUNIT_CASE(drm_test_mm_evict),
KUNIT_CASE(drm_test_mm_evict_range),
KUNIT_CASE(drm_test_mm_topdown),
KUNIT_CASE(drm_test_mm_bottomup),
KUNIT_CASE(drm_test_mm_lowest),
KUNIT_CASE(drm_test_mm_highest),
KUNIT_CASE(drm_test_mm_color),
KUNIT_CASE(drm_test_mm_color_evict),
KUNIT_CASE(drm_test_mm_color_evict_range),
{}
};
static struct kunit_suite drm_mm_test_suite = {
.name = "drm_mm",
.suite_init = drm_mm_suite_init,
.test_cases = drm_mm_tests,
};
kunit_test_suite(drm_mm_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_mm_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kunit test for drm_modes functions
*/
#include <drm/drm_connector.h>
#include <kunit/test.h>
struct drm_get_tv_mode_from_name_test {
const char *name;
enum drm_connector_tv_mode expected_mode;
};
#define TV_MODE_NAME(_name, _mode) \
{ \
.name = _name, \
.expected_mode = _mode, \
}
static void drm_test_get_tv_mode_from_name_valid(struct kunit *test)
{
const struct drm_get_tv_mode_from_name_test *params = test->param_value;
KUNIT_EXPECT_EQ(test,
drm_get_tv_mode_from_name(params->name, strlen(params->name)),
params->expected_mode);
}
static const
struct drm_get_tv_mode_from_name_test drm_get_tv_mode_from_name_valid_tests[] = {
TV_MODE_NAME("NTSC", DRM_MODE_TV_MODE_NTSC),
TV_MODE_NAME("NTSC-443", DRM_MODE_TV_MODE_NTSC_443),
TV_MODE_NAME("NTSC-J", DRM_MODE_TV_MODE_NTSC_J),
TV_MODE_NAME("PAL", DRM_MODE_TV_MODE_PAL),
TV_MODE_NAME("PAL-M", DRM_MODE_TV_MODE_PAL_M),
TV_MODE_NAME("PAL-N", DRM_MODE_TV_MODE_PAL_N),
TV_MODE_NAME("SECAM", DRM_MODE_TV_MODE_SECAM),
};
static void
drm_get_tv_mode_from_name_valid_desc(const struct drm_get_tv_mode_from_name_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
KUNIT_ARRAY_PARAM(drm_get_tv_mode_from_name_valid,
drm_get_tv_mode_from_name_valid_tests,
drm_get_tv_mode_from_name_valid_desc);
static void drm_test_get_tv_mode_from_name_truncated(struct kunit *test)
{
const char *name = "NTS";
int ret;
ret = drm_get_tv_mode_from_name(name, strlen(name));
KUNIT_EXPECT_LT(test, ret, 0);
};
static struct kunit_case drm_get_tv_mode_from_name_tests[] = {
KUNIT_CASE_PARAM(drm_test_get_tv_mode_from_name_valid,
drm_get_tv_mode_from_name_valid_gen_params),
KUNIT_CASE(drm_test_get_tv_mode_from_name_truncated),
{ }
};
static struct kunit_suite drm_get_tv_mode_from_name_test_suite = {
.name = "drm_get_tv_mode_from_name",
.test_cases = drm_get_tv_mode_from_name_tests,
};
kunit_test_suite(drm_get_tv_mode_from_name_test_suite);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_connector_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_framebuffer functions
*
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_device.h>
#include <drm/drm_mode.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
#include "../drm_crtc_internal.h"
#define MIN_WIDTH 4
#define MAX_WIDTH 4096
#define MIN_HEIGHT 4
#define MAX_HEIGHT 4096
struct drm_framebuffer_test {
int buffer_created;
struct drm_mode_fb_cmd2 cmd;
const char *name;
};
static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 max sizes",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 pitch greater than min required",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH + 1, 0, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 pitch less than min required",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH - 1, 0, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 Invalid width",
.cmd = { .width = MAX_WIDTH + 1, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * (MAX_WIDTH + 1), 0, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer handle",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 0, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 0, .name = "No pixel format",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = 0,
.handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 Width 0",
.cmd = { .width = 0, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 Height 0",
.cmd = { .width = MAX_WIDTH, .height = 0, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
}
},
{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
}
},
{ .buffer_created = 0,
.name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
}
},
{ .buffer_created = 0, .name = "ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
}
},
{ .buffer_created = 1, .name = "NV12 Normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .pitches = { 600, 600, 0 },
}
},
{ .buffer_created = 1, .name = "NV12 Max sizes",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 0, .name = "NV12 Invalid pitch",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
}
},
{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 0, .name = "NV12 different modifier per-plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 0, .name = "NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 0, .name = "NV12 Modifier for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
}
},
{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.pitches = { 600, 300, 300 },
}
},
{ .buffer_created = 1, .name = "YVU420 Normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
}
},
{ .buffer_created = 1, .name = "YVU420 Max sizes",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2),
DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 0, .name = "YVU420 Invalid pitch",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) - 1,
DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 1, .name = "YVU420 Different pitches",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
}
},
{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH +
MAX_WIDTH * MAX_HEIGHT, MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
}
},
{ .buffer_created = 0,
.name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 0,
.name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 },
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 0,
.name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 1, .name = "YVU420 Valid modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 0, .name = "YVU420 Different modifiers per plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_YTR,
AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
{ .buffer_created = 1, .name = "X0L2 Normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 }
}
},
{ .buffer_created = 1, .name = "X0L2 Max sizes",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH, 0, 0 }
}
},
{ .buffer_created = 0, .name = "X0L2 Invalid pitch",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH - 1, 0, 0 }
}
},
{ .buffer_created = 1, .name = "X0L2 Pitch greater than minimum required",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
}
},
{ .buffer_created = 0, .name = "X0L2 Handle for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
}
},
{ .buffer_created = 1,
.name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
}
},
{ .buffer_created = 0, .name = "X0L2 Modifier without DRM_MODE_FB_MODIFIERS set",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
.modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
}
},
{ .buffer_created = 1, .name = "X0L2 Valid modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
.modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
}
},
{ .buffer_created = 0, .name = "X0L2 Modifier for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT,
.pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 },
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
.flags = DRM_MODE_FB_MODIFIERS,
}
},
};
static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int *buffer_created = dev->dev_private;
*buffer_created = 1;
return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs mock_config_funcs = {
.fb_create = fb_create_mock,
};
static int drm_framebuffer_test_init(struct kunit *test)
{
struct drm_device *mock;
mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
mock->mode_config.min_width = MIN_WIDTH;
mock->mode_config.max_width = MAX_WIDTH;
mock->mode_config.min_height = MIN_HEIGHT;
mock->mode_config.max_height = MAX_HEIGHT;
mock->mode_config.funcs = &mock_config_funcs;
test->priv = mock;
return 0;
}
static void drm_test_framebuffer_create(struct kunit *test)
{
const struct drm_framebuffer_test *params = test->param_value;
struct drm_device *mock = test->priv;
int buffer_created = 0;
mock->dev_private = &buffer_created;
drm_internal_framebuffer_create(mock, ¶ms->cmd, NULL);
KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created);
}
static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc)
{
strcpy(desc, t->name);
}
KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases,
drm_framebuffer_test_to_desc);
static struct kunit_case drm_framebuffer_tests[] = {
KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params),
{ }
};
static struct kunit_suite drm_framebuffer_test_suite = {
.name = "drm_framebuffer",
.init = drm_framebuffer_test_init,
.test_cases = drm_framebuffer_tests,
};
kunit_test_suite(drm_framebuffer_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_framebuffer_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for the DRM DP MST helpers
*
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "../display/drm_dp_mst_topology_internal.h"
struct drm_dp_mst_calc_pbn_mode_test {
const int clock;
const int bpp;
const bool dsc;
const int expected;
};
static const struct drm_dp_mst_calc_pbn_mode_test drm_dp_mst_calc_pbn_mode_cases[] = {
{
.clock = 154000,
.bpp = 30,
.dsc = false,
.expected = 689
},
{
.clock = 234000,
.bpp = 30,
.dsc = false,
.expected = 1047
},
{
.clock = 297000,
.bpp = 24,
.dsc = false,
.expected = 1063
},
{
.clock = 332880,
.bpp = 24,
.dsc = true,
.expected = 50
},
{
.clock = 324540,
.bpp = 24,
.dsc = true,
.expected = 49
},
};
static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value;
KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc),
params->expected);
}
static void dp_mst_calc_pbn_mode_desc(const struct drm_dp_mst_calc_pbn_mode_test *t, char *desc)
{
sprintf(desc, "Clock %d BPP %d DSC %s", t->clock, t->bpp, t->dsc ? "enabled" : "disabled");
}
KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_cases,
dp_mst_calc_pbn_mode_desc);
static u8 data[] = { 0xff, 0x00, 0xdd };
struct drm_dp_mst_sideband_msg_req_test {
const char *desc;
const struct drm_dp_sideband_msg_req_body in;
};
static const struct drm_dp_mst_sideband_msg_req_test drm_dp_mst_sideband_msg_req_cases[] = {
{
.desc = "DP_ENUM_PATH_RESOURCES with port number",
.in = {
.req_type = DP_ENUM_PATH_RESOURCES,
.u.port_num.port_number = 5,
},
},
{
.desc = "DP_POWER_UP_PHY with port number",
.in = {
.req_type = DP_POWER_UP_PHY,
.u.port_num.port_number = 5,
},
},
{
.desc = "DP_POWER_DOWN_PHY with port number",
.in = {
.req_type = DP_POWER_DOWN_PHY,
.u.port_num.port_number = 5,
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with SDP stream sinks",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.number_sdp_streams = 3,
.u.allocate_payload.sdp_stream_sink = { 1, 2, 3 },
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with port number",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.port_number = 0xf,
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with VCPI",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.vcpi = 0x7f,
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with PBN",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.pbn = U16_MAX,
},
},
{
.desc = "DP_QUERY_PAYLOAD with port number",
.in = {
.req_type = DP_QUERY_PAYLOAD,
.u.query_payload.port_number = 0xf,
},
},
{
.desc = "DP_QUERY_PAYLOAD with VCPI",
.in = {
.req_type = DP_QUERY_PAYLOAD,
.u.query_payload.vcpi = 0x7f,
},
},
{
.desc = "DP_REMOTE_DPCD_READ with port number",
.in = {
.req_type = DP_REMOTE_DPCD_READ,
.u.dpcd_read.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_DPCD_READ with DPCD address",
.in = {
.req_type = DP_REMOTE_DPCD_READ,
.u.dpcd_read.dpcd_address = 0xfedcb,
},
},
{
.desc = "DP_REMOTE_DPCD_READ with max number of bytes",
.in = {
.req_type = DP_REMOTE_DPCD_READ,
.u.dpcd_read.num_bytes = U8_MAX,
},
},
{
.desc = "DP_REMOTE_DPCD_WRITE with port number",
.in = {
.req_type = DP_REMOTE_DPCD_WRITE,
.u.dpcd_write.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_DPCD_WRITE with DPCD address",
.in = {
.req_type = DP_REMOTE_DPCD_WRITE,
.u.dpcd_write.dpcd_address = 0xfedcb,
},
},
{
.desc = "DP_REMOTE_DPCD_WRITE with data array",
.in = {
.req_type = DP_REMOTE_DPCD_WRITE,
.u.dpcd_write.num_bytes = ARRAY_SIZE(data),
.u.dpcd_write.bytes = data,
},
},
{
.desc = "DP_REMOTE_I2C_READ with port number",
.in = {
.req_type = DP_REMOTE_I2C_READ,
.u.i2c_read.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_I2C_READ with I2C device ID",
.in = {
.req_type = DP_REMOTE_I2C_READ,
.u.i2c_read.read_i2c_device_id = 0x7f,
},
},
{
.desc = "DP_REMOTE_I2C_READ with transactions array",
.in = {
.req_type = DP_REMOTE_I2C_READ,
.u.i2c_read.num_transactions = 3,
.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3,
.u.i2c_read.transactions = {
{ .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7f,
.i2c_transaction_delay = 0xf, },
{ .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7e,
.i2c_transaction_delay = 0xe, },
{ .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7d,
.i2c_transaction_delay = 0xd, },
},
},
},
{
.desc = "DP_REMOTE_I2C_WRITE with port number",
.in = {
.req_type = DP_REMOTE_I2C_WRITE,
.u.i2c_write.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_I2C_WRITE with I2C device ID",
.in = {
.req_type = DP_REMOTE_I2C_WRITE,
.u.i2c_write.write_i2c_device_id = 0x7f,
},
},
{
.desc = "DP_REMOTE_I2C_WRITE with data array",
.in = {
.req_type = DP_REMOTE_I2C_WRITE,
.u.i2c_write.num_bytes = ARRAY_SIZE(data),
.u.i2c_write.bytes = data,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with stream ID",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.stream_id = 1,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with client ID",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.client_id = { 0x4f, 0x7f, 0xb4, 0x00, 0x8c, 0x0d, 0x67 },
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with stream event",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.stream_event = 3,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with valid stream event",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.valid_stream_event = 0,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with stream behavior",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.stream_behavior = 3,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with a valid stream behavior",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.valid_stream_behavior = 1,
}
},
};
static bool
sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
const struct drm_dp_sideband_msg_req_body *out)
{
const struct drm_dp_remote_i2c_read_tx *txin, *txout;
int i;
if (in->req_type != out->req_type)
return false;
switch (in->req_type) {
/*
* Compare struct members manually for request types which can't be
* compared simply using memcmp(). This is because said request types
* contain pointers to other allocated structs
*/
case DP_REMOTE_I2C_READ:
#define IN in->u.i2c_read
#define OUT out->u.i2c_read
if (IN.num_bytes_read != OUT.num_bytes_read ||
IN.num_transactions != OUT.num_transactions ||
IN.port_number != OUT.port_number ||
IN.read_i2c_device_id != OUT.read_i2c_device_id)
return false;
for (i = 0; i < IN.num_transactions; i++) {
txin = &IN.transactions[i];
txout = &OUT.transactions[i];
if (txin->i2c_dev_id != txout->i2c_dev_id ||
txin->no_stop_bit != txout->no_stop_bit ||
txin->num_bytes != txout->num_bytes ||
txin->i2c_transaction_delay !=
txout->i2c_transaction_delay)
return false;
if (memcmp(txin->bytes, txout->bytes,
txin->num_bytes) != 0)
return false;
}
break;
#undef IN
#undef OUT
case DP_REMOTE_DPCD_WRITE:
#define IN in->u.dpcd_write
#define OUT out->u.dpcd_write
if (IN.dpcd_address != OUT.dpcd_address ||
IN.num_bytes != OUT.num_bytes ||
IN.port_number != OUT.port_number)
return false;
return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
#undef IN
#undef OUT
case DP_REMOTE_I2C_WRITE:
#define IN in->u.i2c_write
#define OUT out->u.i2c_write
if (IN.port_number != OUT.port_number ||
IN.write_i2c_device_id != OUT.write_i2c_device_id ||
IN.num_bytes != OUT.num_bytes)
return false;
return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
#undef IN
#undef OUT
default:
return memcmp(in, out, sizeof(*in)) == 0;
}
return true;
}
static void drm_test_dp_mst_msg_printf(struct drm_printer *p, struct va_format *vaf)
{
struct kunit *test = p->arg;
kunit_err(test, "%pV", vaf);
}
static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
{
const struct drm_dp_mst_sideband_msg_req_test *params = test->param_value;
const struct drm_dp_sideband_msg_req_body *in = ¶ms->in;
struct drm_dp_sideband_msg_req_body *out;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_printer p = {
.printfn = drm_test_dp_mst_msg_printf,
.arg = test
};
int i;
out = kunit_kzalloc(test, sizeof(*out), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, out);
txmsg = kunit_kzalloc(test, sizeof(*txmsg), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, txmsg);
drm_dp_encode_sideband_req(in, txmsg);
KUNIT_EXPECT_GE_MSG(test, drm_dp_decode_sideband_req(txmsg, out), 0,
"Failed to decode sideband request");
if (!sideband_msg_req_equal(in, out)) {
KUNIT_FAIL(test, "Encode/decode failed");
kunit_err(test, "Expected:");
drm_dp_dump_sideband_msg_req_body(in, 1, &p);
kunit_err(test, "Got:");
drm_dp_dump_sideband_msg_req_body(out, 1, &p);
}
switch (in->req_type) {
case DP_REMOTE_DPCD_WRITE:
kfree(out->u.dpcd_write.bytes);
break;
case DP_REMOTE_I2C_READ:
for (i = 0; i < out->u.i2c_read.num_transactions; i++)
kfree(out->u.i2c_read.transactions[i].bytes);
break;
case DP_REMOTE_I2C_WRITE:
kfree(out->u.i2c_write.bytes);
break;
}
}
static void
drm_dp_mst_sideband_msg_req_desc(const struct drm_dp_mst_sideband_msg_req_test *t, char *desc)
{
strcpy(desc, t->desc);
}
KUNIT_ARRAY_PARAM(drm_dp_mst_sideband_msg_req, drm_dp_mst_sideband_msg_req_cases,
drm_dp_mst_sideband_msg_req_desc);
static struct kunit_case drm_dp_mst_helper_tests[] = {
KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_gen_params),
KUNIT_CASE_PARAM(drm_test_dp_mst_sideband_msg_req_decode,
drm_dp_mst_sideband_msg_req_gen_params),
{ }
};
static struct kunit_suite drm_dp_mst_helper_test_suite = {
.name = "drm_dp_mst_helper",
.test_cases = drm_dp_mst_helper_tests,
};
kunit_test_suite(drm_dp_mst_helper_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_dp_mst_helper_test.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
* Copyright © 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <linux/prime_numbers.h>
#include <linux/sched/signal.h>
#include <drm/drm_buddy.h>
#include "../lib/drm_random.h"
#define TIMEOUT(name__) \
unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
static unsigned int random_seed;
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
__printf(2, 3)
static bool __timeout(unsigned long timeout, const char *fmt, ...)
{
va_list va;
if (!signal_pending(current)) {
cond_resched();
if (time_before(jiffies, timeout))
return false;
}
if (fmt) {
va_start(va, fmt);
vprintk(fmt, va);
va_end(va);
}
return true;
}
static void __dump_block(struct kunit *test, struct drm_buddy *mm,
struct drm_buddy_block *block, bool buddy)
{
kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
block->header, drm_buddy_block_state(block),
drm_buddy_block_order(block), drm_buddy_block_offset(block),
drm_buddy_block_size(mm, block), !block->parent, buddy);
}
static void dump_block(struct kunit *test, struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *buddy;
__dump_block(test, mm, block, false);
buddy = drm_get_buddy(block);
if (buddy)
__dump_block(test, mm, buddy, true);
}
static int check_block(struct kunit *test, struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *buddy;
unsigned int block_state;
u64 block_size;
u64 offset;
int err = 0;
block_state = drm_buddy_block_state(block);
if (block_state != DRM_BUDDY_ALLOCATED &&
block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
kunit_err(test, "block state mismatch\n");
err = -EINVAL;
}
block_size = drm_buddy_block_size(mm, block);
offset = drm_buddy_block_offset(block);
if (block_size < mm->chunk_size) {
kunit_err(test, "block size smaller than min size\n");
err = -EINVAL;
}
/* We can't use is_power_of_2() for a u64 on 32-bit systems. */
if (block_size & (block_size - 1)) {
kunit_err(test, "block size not power of two\n");
err = -EINVAL;
}
if (!IS_ALIGNED(block_size, mm->chunk_size)) {
kunit_err(test, "block size not aligned to min size\n");
err = -EINVAL;
}
if (!IS_ALIGNED(offset, mm->chunk_size)) {
kunit_err(test, "block offset not aligned to min size\n");
err = -EINVAL;
}
if (!IS_ALIGNED(offset, block_size)) {
kunit_err(test, "block offset not aligned to block size\n");
err = -EINVAL;
}
buddy = drm_get_buddy(block);
if (!buddy && block->parent) {
kunit_err(test, "buddy has gone fishing\n");
err = -EINVAL;
}
if (buddy) {
if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
kunit_err(test, "buddy has wrong offset\n");
err = -EINVAL;
}
if (drm_buddy_block_size(mm, buddy) != block_size) {
kunit_err(test, "buddy size mismatch\n");
err = -EINVAL;
}
if (drm_buddy_block_state(buddy) == block_state &&
block_state == DRM_BUDDY_FREE) {
kunit_err(test, "block and its buddy are free\n");
err = -EINVAL;
}
}
return err;
}
static int check_blocks(struct kunit *test, struct drm_buddy *mm,
struct list_head *blocks, u64 expected_size, bool is_contiguous)
{
struct drm_buddy_block *block;
struct drm_buddy_block *prev;
u64 total;
int err = 0;
block = NULL;
prev = NULL;
total = 0;
list_for_each_entry(block, blocks, link) {
err = check_block(test, mm, block);
if (!drm_buddy_block_is_allocated(block)) {
kunit_err(test, "block not allocated\n");
err = -EINVAL;
}
if (is_contiguous && prev) {
u64 prev_block_size;
u64 prev_offset;
u64 offset;
prev_offset = drm_buddy_block_offset(prev);
prev_block_size = drm_buddy_block_size(mm, prev);
offset = drm_buddy_block_offset(block);
if (offset != (prev_offset + prev_block_size)) {
kunit_err(test, "block offset mismatch\n");
err = -EINVAL;
}
}
if (err)
break;
total += drm_buddy_block_size(mm, block);
prev = block;
}
if (!err) {
if (total != expected_size) {
kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
expected_size, total);
err = -EINVAL;
}
return err;
}
if (prev) {
kunit_err(test, "prev block, dump:\n");
dump_block(test, mm, prev);
}
kunit_err(test, "bad block, dump:\n");
dump_block(test, mm, block);
return err;
}
static int check_mm(struct kunit *test, struct drm_buddy *mm)
{
struct drm_buddy_block *root;
struct drm_buddy_block *prev;
unsigned int i;
u64 total;
int err = 0;
if (!mm->n_roots) {
kunit_err(test, "n_roots is zero\n");
return -EINVAL;
}
if (mm->n_roots != hweight64(mm->size)) {
kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
mm->n_roots, hweight64(mm->size));
return -EINVAL;
}
root = NULL;
prev = NULL;
total = 0;
for (i = 0; i < mm->n_roots; ++i) {
struct drm_buddy_block *block;
unsigned int order;
root = mm->roots[i];
if (!root) {
kunit_err(test, "root(%u) is NULL\n", i);
err = -EINVAL;
break;
}
err = check_block(test, mm, root);
if (!drm_buddy_block_is_free(root)) {
kunit_err(test, "root not free\n");
err = -EINVAL;
}
order = drm_buddy_block_order(root);
if (!i) {
if (order != mm->max_order) {
kunit_err(test, "max order root missing\n");
err = -EINVAL;
}
}
if (prev) {
u64 prev_block_size;
u64 prev_offset;
u64 offset;
prev_offset = drm_buddy_block_offset(prev);
prev_block_size = drm_buddy_block_size(mm, prev);
offset = drm_buddy_block_offset(root);
if (offset != (prev_offset + prev_block_size)) {
kunit_err(test, "root offset mismatch\n");
err = -EINVAL;
}
}
block = list_first_entry_or_null(&mm->free_list[order],
struct drm_buddy_block, link);
if (block != root) {
kunit_err(test, "root mismatch at order=%u\n", order);
err = -EINVAL;
}
if (err)
break;
prev = root;
total += drm_buddy_block_size(mm, root);
}
if (!err) {
if (total != mm->size) {
kunit_err(test, "expected mm size=%llx, found=%llx\n",
mm->size, total);
err = -EINVAL;
}
return err;
}
if (prev) {
kunit_err(test, "prev root(%u), dump:\n", i - 1);
dump_block(test, mm, prev);
}
if (root) {
kunit_err(test, "bad root(%u), dump:\n", i);
dump_block(test, mm, root);
}
return err;
}
static void mm_config(u64 *size, u64 *chunk_size)
{
DRM_RND_STATE(prng, random_seed);
u32 s, ms;
/* Nothing fancy, just try to get an interesting bit pattern */
prandom_seed_state(&prng, random_seed);
/* Let size be a random number of pages up to 8 GB (2M pages) */
s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
/* Let the chunk size be a random power of 2 less than size */
ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
/* Round size down to the chunk size */
s &= -ms;
/* Convert from pages to bytes */
*chunk_size = (u64)ms << 12;
*size = (u64)s << 12;
}
static void drm_test_buddy_alloc_pathological(struct kunit *test)
{
u64 mm_size, size, start = 0;
struct drm_buddy_block *block;
const int max_order = 3;
unsigned long flags = 0;
int order, top;
struct drm_buddy mm;
LIST_HEAD(blocks);
LIST_HEAD(holes);
LIST_HEAD(tmp);
/*
* Create a pot-sized mm, then allocate one of each possible
* order within. This should leave the mm with exactly one
* page left. Free the largest block, then whittle down again.
* Eventually we will have a fully 50% fragmented mm.
*/
mm_size = PAGE_SIZE << max_order;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (top = max_order; top; top--) {
/* Make room by freeing the largest allocated block */
block = list_first_entry_or_null(&blocks, typeof(*block), link);
if (block) {
list_del(&block->link);
drm_buddy_free_block(&mm, block);
}
for (order = top; order--;) {
size = get_size(order, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
mm_size, size, size,
&tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
order, top);
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_move_tail(&block->link, &blocks);
}
/* There should be one final page for this sub-allocation */
size = get_size(0, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM for hole\n");
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_move_tail(&block->link, &holes);
size = get_size(top, PAGE_SIZE);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
top, max_order);
}
drm_buddy_free_list(&mm, &holes);
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
size = get_size(order, PAGE_SIZE);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at order %d, it should be full!",
order);
}
list_splice_tail(&holes, &blocks);
drm_buddy_free_list(&mm, &blocks);
drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_smoke(struct kunit *test)
{
u64 mm_size, chunk_size, start = 0;
unsigned long flags = 0;
struct drm_buddy mm;
int *order;
int i;
DRM_RND_STATE(prng, random_seed);
TIMEOUT(end_time);
mm_config(&mm_size, &chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
"buddy_init failed\n");
order = drm_random_order(mm.max_order + 1, &prng);
KUNIT_ASSERT_TRUE(test, order);
for (i = 0; i <= mm.max_order; ++i) {
struct drm_buddy_block *block;
int max_order = order[i];
bool timeout = false;
LIST_HEAD(blocks);
u64 total, size;
LIST_HEAD(tmp);
int order, err;
KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
"pre-mm check failed, abort\n");
order = max_order;
total = 0;
do {
retry:
size = get_size(order, chunk_size);
err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
if (err) {
if (err == -ENOMEM) {
KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
order);
} else {
if (order--) {
err = 0;
goto retry;
}
KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
order);
}
break;
}
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_move_tail(&block->link, &blocks);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
"buddy_alloc order mismatch\n");
total += drm_buddy_block_size(&mm, block);
if (__timeout(end_time, NULL)) {
timeout = true;
break;
}
} while (total < mm.size);
if (!err)
err = check_blocks(test, &mm, &blocks, total, false);
drm_buddy_free_list(&mm, &blocks);
if (!err) {
KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
"post-mm check failed\n");
}
if (err || timeout)
break;
cond_resched();
}
kfree(order);
drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
{
u64 mm_size, size, start = 0;
struct drm_buddy_block *block, *bn;
const unsigned int max_order = 16;
unsigned long flags = 0;
struct drm_buddy mm;
unsigned int order;
LIST_HEAD(blocks);
LIST_HEAD(tmp);
/*
* Create a pot-sized mm, then allocate one of each possible
* order within. This should leave the mm with exactly one
* page left.
*/
mm_size = PAGE_SIZE << max_order;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order < max_order; order++) {
size = get_size(order, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
order);
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_move_tail(&block->link, &blocks);
}
/* And now the last remaining block available */
size = get_size(0, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM on final alloc\n");
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_move_tail(&block->link, &blocks);
/* Should be completely full! */
for (order = max_order; order--;) {
size = get_size(order, PAGE_SIZE);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
}
block = list_last_entry(&blocks, typeof(*block), link);
list_del(&block->link);
drm_buddy_free_block(&mm, block);
/* As we free in increasing size, we make available larger blocks */
order = 1;
list_for_each_entry_safe(block, bn, &blocks, link) {
list_del(&block->link);
drm_buddy_free_block(&mm, block);
size = get_size(order, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
order);
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_del(&block->link);
drm_buddy_free_block(&mm, block);
order++;
}
/* To confirm, now the whole mm should be available */
size = get_size(max_order, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
max_order);
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_del(&block->link);
drm_buddy_free_block(&mm, block);
drm_buddy_free_list(&mm, &blocks);
drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_optimistic(struct kunit *test)
{
u64 mm_size, size, start = 0;
struct drm_buddy_block *block;
unsigned long flags = 0;
const int max_order = 16;
struct drm_buddy mm;
LIST_HEAD(blocks);
LIST_HEAD(tmp);
int order;
/*
* Create a mm with one block of each order available, and
* try to allocate them all.
*/
mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order <= max_order; order++) {
size = get_size(order, PAGE_SIZE);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
order);
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
list_move_tail(&block->link, &blocks);
}
/* Should be completely full! */
size = get_size(0, PAGE_SIZE);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
drm_buddy_free_list(&mm, &blocks);
drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_range(struct kunit *test)
{
unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
u64 offset, size, rem, chunk_size, end;
unsigned long page_num;
struct drm_buddy mm;
LIST_HEAD(blocks);
mm_config(&size, &chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
"buddy_init failed");
KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
"pre-mm check failed, abort!");
rem = mm.size;
offset = 0;
for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
struct drm_buddy_block *block;
LIST_HEAD(tmp);
size = min(page_num * mm.chunk_size, rem);
end = offset + size;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
size, mm.chunk_size,
&tmp, flags),
"alloc_range with offset=%llx, size=%llx failed\n", offset, size);
block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
"alloc_range start offset mismatch, found=%llx, expected=%llx\n",
drm_buddy_block_offset(block), offset);
KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
list_splice_tail(&tmp, &blocks);
offset += size;
rem -= size;
if (!rem)
break;
cond_resched();
}
drm_buddy_free_list(&mm, &blocks);
KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_limit(struct kunit *test)
{
u64 size = U64_MAX, start = 0;
struct drm_buddy_block *block;
unsigned long flags = 0;
LIST_HEAD(allocated);
struct drm_buddy mm;
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
"mm.max_order(%d) != %d\n", mm.max_order,
DRM_BUDDY_MAX_ORDER);
size = mm.chunk_size << mm.max_order;
KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
PAGE_SIZE, &allocated, flags));
block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
KUNIT_EXPECT_TRUE(test, block);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
"block order(%d) != %d\n",
drm_buddy_block_order(block), mm.max_order);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE,
"block size(%llu) != %llu\n",
drm_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE);
drm_buddy_free_list(&mm, &allocated);
drm_buddy_fini(&mm);
}
static int drm_buddy_suite_init(struct kunit_suite *suite)
{
while (!random_seed)
random_seed = get_random_u32();
kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
return 0;
}
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
KUNIT_CASE(drm_test_buddy_alloc_range),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_smoke),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
.suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
kunit_test_suite(drm_buddy_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_buddy_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Maxime Ripard <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
struct drm_client_modeset_test_priv {
struct drm_device *drm;
struct device *dev;
struct drm_connector connector;
};
static int drm_client_modeset_connector_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode;
int count;
count = drm_add_modes_noedid(connector, 1920, 1200);
mode = drm_mode_analog_ntsc_480i(connector->dev);
if (!mode)
return count;
drm_mode_probed_add(connector, mode);
count += 1;
mode = drm_mode_analog_pal_576i(connector->dev);
if (!mode)
return count;
drm_mode_probed_add(connector, mode);
count += 1;
return count;
}
static const struct drm_connector_helper_funcs drm_client_modeset_connector_helper_funcs = {
.get_modes = drm_client_modeset_connector_get_modes,
};
static const struct drm_connector_funcs drm_client_modeset_connector_funcs = {
};
static int drm_client_modeset_test_init(struct kunit *test)
{
struct drm_client_modeset_test_priv *priv;
int ret;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
test->priv = priv;
priv->dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
sizeof(*priv->drm), 0,
DRIVER_MODESET);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
ret = drmm_connector_init(priv->drm, &priv->connector,
&drm_client_modeset_connector_funcs,
DRM_MODE_CONNECTOR_Unknown,
NULL);
KUNIT_ASSERT_EQ(test, ret, 0);
drm_connector_helper_add(&priv->connector, &drm_client_modeset_connector_helper_funcs);
priv->connector.interlace_allowed = true;
priv->connector.doublescan_allowed = true;
return 0;
}
static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
{
struct drm_client_modeset_test_priv *priv = test->priv;
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
struct drm_display_mode *expected_mode, *mode;
const char *cmdline = "1920x1080@60";
int ret;
expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
KUNIT_ASSERT_TRUE(test,
drm_mode_parse_command_line_for_connector(cmdline,
connector,
cmdline_mode));
mutex_lock(&drm->mode_config.mutex);
ret = drm_helper_probe_single_connector_modes(connector, 1920, 1080);
mutex_unlock(&drm->mode_config.mutex);
KUNIT_ASSERT_GT(test, ret, 0);
mode = drm_connector_pick_cmdline_mode(connector);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
struct drm_connector_pick_cmdline_mode_test {
const char *cmdline;
struct drm_display_mode *(*func)(struct drm_device *drm);
};
#define TEST_CMDLINE(_cmdline, _fn) \
{ \
.cmdline = _cmdline, \
.func = _fn, \
}
static void drm_test_pick_cmdline_named(struct kunit *test)
{
const struct drm_connector_pick_cmdline_mode_test *params = test->param_value;
struct drm_client_modeset_test_priv *priv = test->priv;
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
const struct drm_display_mode *expected_mode, *mode;
const char *cmdline = params->cmdline;
int ret;
KUNIT_ASSERT_TRUE(test,
drm_mode_parse_command_line_for_connector(cmdline,
connector,
cmdline_mode));
mutex_lock(&drm->mode_config.mutex);
ret = drm_helper_probe_single_connector_modes(connector, 1920, 1080);
mutex_unlock(&drm->mode_config.mutex);
KUNIT_ASSERT_GT(test, ret, 0);
mode = drm_connector_pick_cmdline_mode(connector);
KUNIT_ASSERT_NOT_NULL(test, mode);
expected_mode = params->func(drm);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
static const
struct drm_connector_pick_cmdline_mode_test drm_connector_pick_cmdline_mode_tests[] = {
TEST_CMDLINE("NTSC", drm_mode_analog_ntsc_480i),
TEST_CMDLINE("NTSC-J", drm_mode_analog_ntsc_480i),
TEST_CMDLINE("PAL", drm_mode_analog_pal_576i),
TEST_CMDLINE("PAL-M", drm_mode_analog_ntsc_480i),
};
static void
drm_connector_pick_cmdline_mode_desc(const struct drm_connector_pick_cmdline_mode_test *t,
char *desc)
{
sprintf(desc, "%s", t->cmdline);
}
KUNIT_ARRAY_PARAM(drm_connector_pick_cmdline_mode,
drm_connector_pick_cmdline_mode_tests,
drm_connector_pick_cmdline_mode_desc);
static struct kunit_case drm_test_pick_cmdline_tests[] = {
KUNIT_CASE(drm_test_pick_cmdline_res_1920_1080_60),
KUNIT_CASE_PARAM(drm_test_pick_cmdline_named,
drm_connector_pick_cmdline_mode_gen_params),
{}
};
static struct kunit_suite drm_test_pick_cmdline_test_suite = {
.name = "drm_test_pick_cmdline",
.init = drm_client_modeset_test_init,
.test_cases = drm_test_pick_cmdline_tests
};
kunit_test_suite(drm_test_pick_cmdline_test_suite);
/*
* This file is included directly by drm_client_modeset.c so we can't
* use any MODULE_* macro here.
*/
| linux-master | drivers/gpu/drm/tests/drm_client_modeset_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Bootlin
* Copyright (c) 2022 Maíra Canal <[email protected]>
*/
#include <kunit/test.h>
#include <drm/drm_connector.h>
#include <drm/drm_modes.h>
static const struct drm_connector no_connector = {};
static void drm_test_cmdline_force_e_only(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "e";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_force_D_only_not_digital(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "D";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static const struct drm_connector connector_hdmi = {
.connector_type = DRM_MODE_CONNECTOR_HDMIB,
};
static void drm_test_cmdline_force_D_only_hdmi(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "D";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&connector_hdmi, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
}
static const struct drm_connector connector_dvi = {
.connector_type = DRM_MODE_CONNECTOR_DVII,
};
static void drm_test_cmdline_force_D_only_dvi(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "D";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&connector_dvi, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
}
static void drm_test_cmdline_force_d_only(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "d";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
}
static void drm_test_cmdline_res(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_vesa(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480M";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_TRUE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_vesa_rblank(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480MR";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_TRUE(test, mode.rb);
KUNIT_EXPECT_TRUE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_rblank(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480R";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_TRUE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_bpp(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_refresh(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480@60";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_bpp_refresh(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_bpp_refresh_interlaced(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60i";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_TRUE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_bpp_refresh_margins(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60m";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_TRUE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_res_bpp_refresh_force_off(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60d";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
}
static void drm_test_cmdline_res_bpp_refresh_force_on(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60e";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_res_bpp_refresh_force_on_analog(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60D";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_res_bpp_refresh_force_on_digital(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
static const struct drm_connector connector = {
.connector_type = DRM_MODE_CONNECTOR_DVII,
};
const char *cmdline = "720x480-24@60D";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
}
static void drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24@60ime";
KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
KUNIT_EXPECT_EQ(test, mode.refresh, 60);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_TRUE(test, mode.interlace);
KUNIT_EXPECT_TRUE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_res_margins_force_on(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480me";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_TRUE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_res_vesa_margins(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480Mm";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_TRUE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_TRUE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_name(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "NTSC";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
}
static void drm_test_cmdline_name_bpp(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "NTSC-24";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
}
static void drm_test_cmdline_name_option(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "NTSC,rotate=180";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
}
static void drm_test_cmdline_name_bpp_option(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "NTSC-24,rotate=180";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
}
static void drm_test_cmdline_rotate_0(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,rotate=0";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_0);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_rotate_90(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,rotate=90";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_90);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_rotate_180(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,rotate=180";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_rotate_270(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,rotate=270";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_270);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_hmirror(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,reflect_x";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_vmirror(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,reflect_y";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_margin_options(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline =
"720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_multiple_options(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480,rotate=270,reflect_x";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X));
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_bpp_extra_and_option(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480-24e,rotate=180";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.bpp, 24);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_extra_and_option(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "720x480e,rotate=180";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, 720);
KUNIT_EXPECT_EQ(test, mode.yres, 480);
KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_freestanding_options(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
static void drm_test_cmdline_freestanding_force_e_and_options(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
}
static void drm_test_cmdline_panel_orientation(struct kunit *test)
{
struct drm_cmdline_mode mode = { };
const char *cmdline = "panel_orientation=upside_down";
KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
&no_connector, &mode));
KUNIT_EXPECT_FALSE(test, mode.specified);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_EQ(test, mode.panel_orientation, DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_FALSE(test, mode.interlace);
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
struct drm_cmdline_invalid_test {
const char *name;
const char *cmdline;
};
static void drm_test_cmdline_invalid(struct kunit *test)
{
const struct drm_cmdline_invalid_test *params = test->param_value;
struct drm_cmdline_mode mode = { };
KUNIT_EXPECT_FALSE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
&no_connector,
&mode));
}
static const struct drm_cmdline_invalid_test drm_cmdline_invalid_tests[] = {
{
.name = "margin_only",
.cmdline = "m",
},
{
.name = "interlace_only",
.cmdline = "i",
},
{
.name = "res_missing_x",
.cmdline = "x480",
},
{
.name = "res_missing_y",
.cmdline = "1024x",
},
{
.name = "res_bad_y",
.cmdline = "1024xtest",
},
{
.name = "res_missing_y_bpp",
.cmdline = "1024x-24",
},
{
.name = "res_bad_bpp",
.cmdline = "720x480-test",
},
{
.name = "res_bad_refresh",
.cmdline = "720x480@refresh",
},
{
.name = "res_bpp_refresh_force_on_off",
.cmdline = "720x480-24@60de",
},
{
.name = "res_invalid_mode",
.cmdline = "720x480f",
},
{
.name = "res_bpp_wrong_place_mode",
.cmdline = "720x480e-24",
},
{
.name = "name_bpp_refresh",
.cmdline = "NTSC-24@60",
},
{
.name = "name_refresh",
.cmdline = "NTSC@60",
},
{
.name = "name_refresh_wrong_mode",
.cmdline = "NTSC@60m",
},
{
.name = "name_refresh_invalid_mode",
.cmdline = "NTSC@60f",
},
{
.name = "rotate_multiple",
.cmdline = "720x480,rotate=0,rotate=90",
},
{
.name = "rotate_invalid_val",
.cmdline = "720x480,rotate=42",
},
{
.name = "rotate_truncated",
.cmdline = "720x480,rotate=",
},
{
.name = "invalid_option",
.cmdline = "720x480,test=42",
},
{
.name = "invalid_tv_option",
.cmdline = "720x480i,tv_mode=invalid",
},
{
.name = "truncated_tv_option",
.cmdline = "720x480i,tv_mode=NTS",
},
};
static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc);
struct drm_cmdline_tv_option_test {
const char *name;
const char *cmdline;
struct drm_display_mode *(*mode_fn)(struct drm_device *dev);
enum drm_connector_tv_mode tv_mode;
};
static void drm_test_cmdline_tv_options(struct kunit *test)
{
const struct drm_cmdline_tv_option_test *params = test->param_value;
const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
struct drm_cmdline_mode mode = { };
KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
&no_connector, &mode));
KUNIT_EXPECT_TRUE(test, mode.specified);
KUNIT_EXPECT_EQ(test, mode.xres, expected_mode->hdisplay);
KUNIT_EXPECT_EQ(test, mode.yres, expected_mode->vdisplay);
KUNIT_EXPECT_EQ(test, mode.tv_mode, params->tv_mode);
KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
KUNIT_EXPECT_FALSE(test, mode.rb);
KUNIT_EXPECT_FALSE(test, mode.cvt);
KUNIT_EXPECT_EQ(test, mode.interlace, !!(expected_mode->flags & DRM_MODE_FLAG_INTERLACE));
KUNIT_EXPECT_FALSE(test, mode.margins);
KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
}
#define TV_OPT_TEST(_opt, _cmdline, _mode_fn) \
{ \
.name = #_opt, \
.cmdline = _cmdline, \
.mode_fn = _mode_fn, \
.tv_mode = DRM_MODE_TV_MODE_ ## _opt, \
}
static const struct drm_cmdline_tv_option_test drm_cmdline_tv_option_tests[] = {
TV_OPT_TEST(NTSC, "720x480i,tv_mode=NTSC", drm_mode_analog_ntsc_480i),
TV_OPT_TEST(NTSC_443, "720x480i,tv_mode=NTSC-443", drm_mode_analog_ntsc_480i),
TV_OPT_TEST(NTSC_J, "720x480i,tv_mode=NTSC-J", drm_mode_analog_ntsc_480i),
TV_OPT_TEST(PAL, "720x576i,tv_mode=PAL", drm_mode_analog_pal_576i),
TV_OPT_TEST(PAL_M, "720x480i,tv_mode=PAL-M", drm_mode_analog_ntsc_480i),
TV_OPT_TEST(PAL_N, "720x576i,tv_mode=PAL-N", drm_mode_analog_pal_576i),
TV_OPT_TEST(SECAM, "720x576i,tv_mode=SECAM", drm_mode_analog_pal_576i),
};
static void drm_cmdline_tv_option_desc(const struct drm_cmdline_tv_option_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
KUNIT_ARRAY_PARAM(drm_cmdline_tv_option,
drm_cmdline_tv_option_tests,
drm_cmdline_tv_option_desc);
static struct kunit_case drm_cmdline_parser_tests[] = {
KUNIT_CASE(drm_test_cmdline_force_d_only),
KUNIT_CASE(drm_test_cmdline_force_D_only_dvi),
KUNIT_CASE(drm_test_cmdline_force_D_only_hdmi),
KUNIT_CASE(drm_test_cmdline_force_D_only_not_digital),
KUNIT_CASE(drm_test_cmdline_force_e_only),
KUNIT_CASE(drm_test_cmdline_res),
KUNIT_CASE(drm_test_cmdline_res_vesa),
KUNIT_CASE(drm_test_cmdline_res_vesa_rblank),
KUNIT_CASE(drm_test_cmdline_res_rblank),
KUNIT_CASE(drm_test_cmdline_res_bpp),
KUNIT_CASE(drm_test_cmdline_res_refresh),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_margins),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_off),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_analog),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_digital),
KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on),
KUNIT_CASE(drm_test_cmdline_res_margins_force_on),
KUNIT_CASE(drm_test_cmdline_res_vesa_margins),
KUNIT_CASE(drm_test_cmdline_name),
KUNIT_CASE(drm_test_cmdline_name_bpp),
KUNIT_CASE(drm_test_cmdline_name_option),
KUNIT_CASE(drm_test_cmdline_name_bpp_option),
KUNIT_CASE(drm_test_cmdline_rotate_0),
KUNIT_CASE(drm_test_cmdline_rotate_90),
KUNIT_CASE(drm_test_cmdline_rotate_180),
KUNIT_CASE(drm_test_cmdline_rotate_270),
KUNIT_CASE(drm_test_cmdline_hmirror),
KUNIT_CASE(drm_test_cmdline_vmirror),
KUNIT_CASE(drm_test_cmdline_margin_options),
KUNIT_CASE(drm_test_cmdline_multiple_options),
KUNIT_CASE(drm_test_cmdline_bpp_extra_and_option),
KUNIT_CASE(drm_test_cmdline_extra_and_option),
KUNIT_CASE(drm_test_cmdline_freestanding_options),
KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options),
KUNIT_CASE(drm_test_cmdline_panel_orientation),
KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params),
KUNIT_CASE_PARAM(drm_test_cmdline_tv_options, drm_cmdline_tv_option_gen_params),
{}
};
static struct kunit_suite drm_cmdline_parser_test_suite = {
.name = "drm_cmdline_parser",
.test_cases = drm_cmdline_parser_tests
};
kunit_test_suite(drm_cmdline_parser_test_suite);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tests/drm_cmdline_parser_test.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2020 Rockchip Electronics Co., Ltd.
* Author: Andy Yan <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/swab.h>
#include <drm/drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <uapi/linux/videodev2.h>
#include <dt-bindings/soc/rockchip,vop2.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
/*
* VOP2 architecture
*
+----------+ +-------------+ +-----------+
| Cluster | | Sel 1 from 6| | 1 from 3 |
| window0 | | Layer0 | | RGB |
+----------+ +-------------+ +---------------+ +-------------+ +-----------+
+----------+ +-------------+ |N from 6 layers| | |
| Cluster | | Sel 1 from 6| | Overlay0 +--->| Video Port0 | +-----------+
| window1 | | Layer1 | | | | | | 1 from 3 |
+----------+ +-------------+ +---------------+ +-------------+ | LVDS |
+----------+ +-------------+ +-----------+
| Esmart | | Sel 1 from 6|
| window0 | | Layer2 | +---------------+ +-------------+ +-----------+
+----------+ +-------------+ |N from 6 Layers| | | +--> | 1 from 3 |
+----------+ +-------------+ --------> | Overlay1 +--->| Video Port1 | | MIPI |
| Esmart | | Sel 1 from 6| --------> | | | | +-----------+
| Window1 | | Layer3 | +---------------+ +-------------+
+----------+ +-------------+ +-----------+
+----------+ +-------------+ | 1 from 3 |
| Smart | | Sel 1 from 6| +---------------+ +-------------+ | HDMI |
| Window0 | | Layer4 | |N from 6 Layers| | | +-----------+
+----------+ +-------------+ | Overlay2 +--->| Video Port2 |
+----------+ +-------------+ | | | | +-----------+
| Smart | | Sel 1 from 6| +---------------+ +-------------+ | 1 from 3 |
| Window1 | | Layer5 | | eDP |
+----------+ +-------------+ +-----------+
*
*/
enum vop2_data_format {
VOP2_FMT_ARGB8888 = 0,
VOP2_FMT_RGB888,
VOP2_FMT_RGB565,
VOP2_FMT_XRGB101010,
VOP2_FMT_YUV420SP,
VOP2_FMT_YUV422SP,
VOP2_FMT_YUV444SP,
VOP2_FMT_YUYV422 = 8,
VOP2_FMT_YUYV420,
VOP2_FMT_VYUY422,
VOP2_FMT_VYUY420,
VOP2_FMT_YUV420SP_TILE_8x4 = 0x10,
VOP2_FMT_YUV420SP_TILE_16x2,
VOP2_FMT_YUV422SP_TILE_8x4,
VOP2_FMT_YUV422SP_TILE_16x2,
VOP2_FMT_YUV420SP_10,
VOP2_FMT_YUV422SP_10,
VOP2_FMT_YUV444SP_10,
};
enum vop2_afbc_format {
VOP2_AFBC_FMT_RGB565,
VOP2_AFBC_FMT_ARGB2101010 = 2,
VOP2_AFBC_FMT_YUV420_10BIT,
VOP2_AFBC_FMT_RGB888,
VOP2_AFBC_FMT_ARGB8888,
VOP2_AFBC_FMT_YUV420 = 9,
VOP2_AFBC_FMT_YUV422 = 0xb,
VOP2_AFBC_FMT_YUV422_10BIT = 0xe,
VOP2_AFBC_FMT_INVALID = -1,
};
union vop2_alpha_ctrl {
u32 val;
struct {
/* [0:1] */
u32 color_mode:1;
u32 alpha_mode:1;
/* [2:3] */
u32 blend_mode:2;
u32 alpha_cal_mode:1;
/* [5:7] */
u32 factor_mode:3;
/* [8:9] */
u32 alpha_en:1;
u32 src_dst_swap:1;
u32 reserved:6;
/* [16:23] */
u32 glb_alpha:8;
} bits;
};
struct vop2_alpha {
union vop2_alpha_ctrl src_color_ctrl;
union vop2_alpha_ctrl dst_color_ctrl;
union vop2_alpha_ctrl src_alpha_ctrl;
union vop2_alpha_ctrl dst_alpha_ctrl;
};
struct vop2_alpha_config {
bool src_premulti_en;
bool dst_premulti_en;
bool src_pixel_alpha_en;
bool dst_pixel_alpha_en;
u16 src_glb_alpha_value;
u16 dst_glb_alpha_value;
};
struct vop2_win {
struct vop2 *vop2;
struct drm_plane base;
const struct vop2_win_data *data;
struct regmap_field *reg[VOP2_WIN_MAX_REG];
/**
* @win_id: graphic window id, a cluster may be split into two
* graphics windows.
*/
u8 win_id;
u8 delay;
u32 offset;
enum drm_plane_type type;
};
struct vop2_video_port {
struct drm_crtc crtc;
struct vop2 *vop2;
struct clk *dclk;
unsigned int id;
const struct vop2_video_port_regs *regs;
const struct vop2_video_port_data *data;
struct completion dsp_hold_completion;
/**
* @win_mask: Bitmask of windows attached to the video port;
*/
u32 win_mask;
struct vop2_win *primary_plane;
struct drm_pending_vblank_event *event;
unsigned int nlayers;
};
struct vop2 {
struct device *dev;
struct drm_device *drm;
struct vop2_video_port vps[ROCKCHIP_MAX_CRTC];
const struct vop2_data *data;
/*
* Number of windows that are registered as plane, may be less than the
* total number of hardware windows.
*/
u32 registered_num_wins;
void __iomem *regs;
struct regmap *map;
struct regmap *grf;
/* physical map length of vop2 register */
u32 len;
void __iomem *lut_regs;
/* protects crtc enable/disable */
struct mutex vop2_lock;
int irq;
/*
* Some global resources are shared between all video ports(crtcs), so
* we need a ref counter here.
*/
unsigned int enable_count;
struct clk *hclk;
struct clk *aclk;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
/* must be put at the end of the struct */
struct vop2_win win[];
};
static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
{
return container_of(crtc, struct vop2_video_port, crtc);
}
static struct vop2_win *to_vop2_win(struct drm_plane *p)
{
return container_of(p, struct vop2_win, base);
}
static void vop2_lock(struct vop2 *vop2)
{
mutex_lock(&vop2->vop2_lock);
}
static void vop2_unlock(struct vop2 *vop2)
{
mutex_unlock(&vop2->vop2_lock);
}
static void vop2_writel(struct vop2 *vop2, u32 offset, u32 v)
{
regmap_write(vop2->map, offset, v);
}
static void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v)
{
regmap_write(vp->vop2->map, vp->data->offset + offset, v);
}
static u32 vop2_readl(struct vop2 *vop2, u32 offset)
{
u32 val;
regmap_read(vop2->map, offset, &val);
return val;
}
static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
{
regmap_field_write(win->reg[reg], v);
}
static bool vop2_cluster_window(const struct vop2_win *win)
{
return win->data->feature & WIN_FEATURE_CLUSTER;
}
static void vop2_cfg_done(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE,
BIT(vp->id) | RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
}
static void vop2_win_disable(struct vop2_win *win)
{
vop2_win_write(win, VOP2_WIN_ENABLE, 0);
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_CLUSTER_ENABLE, 0);
}
static enum vop2_data_format vop2_convert_format(u32 format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return VOP2_FMT_ARGB8888;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
return VOP2_FMT_RGB888;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return VOP2_FMT_RGB565;
case DRM_FORMAT_NV12:
return VOP2_FMT_YUV420SP;
case DRM_FORMAT_NV16:
return VOP2_FMT_YUV422SP;
case DRM_FORMAT_NV24:
return VOP2_FMT_YUV444SP;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
return VOP2_FMT_VYUY422;
case DRM_FORMAT_VYUY:
case DRM_FORMAT_UYVY:
return VOP2_FMT_YUYV422;
default:
DRM_ERROR("unsupported format[%08x]\n", format);
return -EINVAL;
}
}
static enum vop2_afbc_format vop2_convert_afbc_format(u32 format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return VOP2_AFBC_FMT_ARGB8888;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
return VOP2_AFBC_FMT_RGB888;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return VOP2_AFBC_FMT_RGB565;
case DRM_FORMAT_NV12:
return VOP2_AFBC_FMT_YUV420;
case DRM_FORMAT_NV16:
return VOP2_AFBC_FMT_YUV422;
default:
return VOP2_AFBC_FMT_INVALID;
}
return VOP2_AFBC_FMT_INVALID;
}
static bool vop2_win_rb_swap(u32 format)
{
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565:
return true;
default:
return false;
}
}
static bool vop2_afbc_rb_swap(u32 format)
{
switch (format) {
case DRM_FORMAT_NV24:
return true;
default:
return false;
}
}
static bool vop2_afbc_uv_swap(u32 format)
{
switch (format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
return true;
default:
return false;
}
}
static bool vop2_win_uv_swap(u32 format)
{
switch (format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
return true;
default:
return false;
}
}
static bool vop2_win_dither_up(u32 format)
{
switch (format) {
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB565:
return true;
default:
return false;
}
}
static bool vop2_output_uv_swap(u32 bus_format, u32 output_mode)
{
/*
* FIXME:
*
* There is no media type for YUV444 output,
* so when out_mode is AAAA or P888, assume output is YUV444 on
* yuv format.
*
* From H/W testing, YUV444 mode need a rb swap.
*/
if (bus_format == MEDIA_BUS_FMT_YVYU8_1X16 ||
bus_format == MEDIA_BUS_FMT_VYUY8_1X16 ||
bus_format == MEDIA_BUS_FMT_YVYU8_2X8 ||
bus_format == MEDIA_BUS_FMT_VYUY8_2X8 ||
((bus_format == MEDIA_BUS_FMT_YUV8_1X24 ||
bus_format == MEDIA_BUS_FMT_YUV10_1X30) &&
(output_mode == ROCKCHIP_OUT_MODE_AAAA ||
output_mode == ROCKCHIP_OUT_MODE_P888)))
return true;
else
return false;
}
static bool is_yuv_output(u32 bus_format)
{
switch (bus_format) {
case MEDIA_BUS_FMT_YUV8_1X24:
case MEDIA_BUS_FMT_YUV10_1X30:
case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_YVYU8_1X16:
case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_VYUY8_1X16:
return true;
default:
return false;
}
}
static bool rockchip_afbc(struct drm_plane *plane, u64 modifier)
{
int i;
if (modifier == DRM_FORMAT_MOD_LINEAR)
return false;
for (i = 0 ; i < plane->modifier_count; i++)
if (plane->modifiers[i] == modifier)
return true;
return false;
}
static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
u64 modifier)
{
struct vop2_win *win = to_vop2_win(plane);
struct vop2 *vop2 = win->vop2;
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (!rockchip_afbc(plane, modifier)) {
drm_err(vop2->drm, "Unsupported format modifier 0x%llx\n",
modifier);
return false;
}
return vop2_convert_afbc_format(format) >= 0;
}
static u32 vop2_afbc_transform_offset(struct drm_plane_state *pstate,
bool afbc_half_block_en)
{
struct drm_rect *src = &pstate->src;
struct drm_framebuffer *fb = pstate->fb;
u32 bpp = fb->format->cpp[0] * 8;
u32 vir_width = (fb->pitches[0] << 3) / bpp;
u32 width = drm_rect_width(src) >> 16;
u32 height = drm_rect_height(src) >> 16;
u32 act_xoffset = src->x1 >> 16;
u32 act_yoffset = src->y1 >> 16;
u32 align16_crop = 0;
u32 align64_crop = 0;
u32 height_tmp;
u8 tx, ty;
u8 bottom_crop_line_num = 0;
/* 16 pixel align */
if (height & 0xf)
align16_crop = 16 - (height & 0xf);
height_tmp = height + align16_crop;
/* 64 pixel align */
if (height_tmp & 0x3f)
align64_crop = 64 - (height_tmp & 0x3f);
bottom_crop_line_num = align16_crop + align64_crop;
switch (pstate->rotation &
(DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y |
DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270)) {
case DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y:
tx = 16 - ((act_xoffset + width) & 0xf);
ty = bottom_crop_line_num - act_yoffset;
break;
case DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90:
tx = bottom_crop_line_num - act_yoffset;
ty = vir_width - width - act_xoffset;
break;
case DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_270:
tx = act_yoffset;
ty = act_xoffset;
break;
case DRM_MODE_REFLECT_X:
tx = 16 - ((act_xoffset + width) & 0xf);
ty = act_yoffset;
break;
case DRM_MODE_REFLECT_Y:
tx = act_xoffset;
ty = bottom_crop_line_num - act_yoffset;
break;
case DRM_MODE_ROTATE_90:
tx = bottom_crop_line_num - act_yoffset;
ty = act_xoffset;
break;
case DRM_MODE_ROTATE_270:
tx = act_yoffset;
ty = vir_width - width - act_xoffset;
break;
case 0:
tx = act_xoffset;
ty = act_yoffset;
break;
}
if (afbc_half_block_en)
ty &= 0x7f;
#define TRANSFORM_XOFFSET GENMASK(7, 0)
#define TRANSFORM_YOFFSET GENMASK(23, 16)
return FIELD_PREP(TRANSFORM_XOFFSET, tx) |
FIELD_PREP(TRANSFORM_YOFFSET, ty);
}
/*
* A Cluster window has 2048 x 16 line buffer, which can
* works at 2048 x 16(Full) or 4096 x 8 (Half) mode.
* for Cluster_lb_mode register:
* 0: half mode, for plane input width range 2048 ~ 4096
* 1: half mode, for cluster work at 2 * 2048 plane mode
* 2: half mode, for rotate_90/270 mode
*
*/
static int vop2_get_cluster_lb_mode(struct vop2_win *win,
struct drm_plane_state *pstate)
{
if ((pstate->rotation & DRM_MODE_ROTATE_270) ||
(pstate->rotation & DRM_MODE_ROTATE_90))
return 2;
else
return 0;
}
static u16 vop2_scale_factor(u32 src, u32 dst)
{
u32 fac;
int shift;
if (src == dst)
return 0;
if (dst < 2)
return U16_MAX;
if (src < 2)
return 0;
if (src > dst)
shift = 12;
else
shift = 16;
src--;
dst--;
fac = DIV_ROUND_UP(src << shift, dst) - 1;
if (fac > U16_MAX)
return U16_MAX;
return fac;
}
static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
u32 src_w, u32 src_h, u32 dst_w,
u32 dst_h, u32 pixel_format)
{
const struct drm_format_info *info;
u16 hor_scl_mode, ver_scl_mode;
u16 hscl_filter_mode, vscl_filter_mode;
u8 gt2 = 0;
u8 gt4 = 0;
u32 val;
info = drm_format_info(pixel_format);
if (src_h >= (4 * dst_h)) {
gt4 = 1;
src_h >>= 2;
} else if (src_h >= (2 * dst_h)) {
gt2 = 1;
src_h >>= 1;
}
hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
if (hor_scl_mode == SCALE_UP)
hscl_filter_mode = VOP2_SCALE_UP_BIC;
else
hscl_filter_mode = VOP2_SCALE_DOWN_BIL;
if (ver_scl_mode == SCALE_UP)
vscl_filter_mode = VOP2_SCALE_UP_BIL;
else
vscl_filter_mode = VOP2_SCALE_DOWN_BIL;
/*
* RK3568 VOP Esmart/Smart dsp_w should be even pixel
* at scale down mode
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
if ((hor_scl_mode == SCALE_DOWN) && (dst_w & 0x1)) {
drm_dbg(vop2->drm, "%s dst_w[%d] should align as 2 pixel\n",
win->data->name, dst_w);
dst_w++;
}
}
val = vop2_scale_factor(src_w, dst_w);
vop2_win_write(win, VOP2_WIN_SCALE_YRGB_X, val);
val = vop2_scale_factor(src_h, dst_h);
vop2_win_write(win, VOP2_WIN_SCALE_YRGB_Y, val);
vop2_win_write(win, VOP2_WIN_VSD_YRGB_GT4, gt4);
vop2_win_write(win, VOP2_WIN_VSD_YRGB_GT2, gt2);
vop2_win_write(win, VOP2_WIN_YRGB_HOR_SCL_MODE, hor_scl_mode);
vop2_win_write(win, VOP2_WIN_YRGB_VER_SCL_MODE, ver_scl_mode);
if (vop2_cluster_window(win))
return;
vop2_win_write(win, VOP2_WIN_YRGB_HSCL_FILTER_MODE, hscl_filter_mode);
vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
if (info->is_yuv) {
src_w /= info->hsub;
src_h /= info->vsub;
gt4 = 0;
gt2 = 0;
if (src_h >= (4 * dst_h)) {
gt4 = 1;
src_h >>= 2;
} else if (src_h >= (2 * dst_h)) {
gt2 = 1;
src_h >>= 1;
}
hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
val = vop2_scale_factor(src_w, dst_w);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
val = vop2_scale_factor(src_h, dst_h);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT2, gt2);
vop2_win_write(win, VOP2_WIN_CBCR_HOR_SCL_MODE, hor_scl_mode);
vop2_win_write(win, VOP2_WIN_CBCR_VER_SCL_MODE, ver_scl_mode);
vop2_win_write(win, VOP2_WIN_CBCR_HSCL_FILTER_MODE, hscl_filter_mode);
vop2_win_write(win, VOP2_WIN_CBCR_VSCL_FILTER_MODE, vscl_filter_mode);
}
}
static int vop2_convert_csc_mode(int csc_mode)
{
switch (csc_mode) {
case V4L2_COLORSPACE_SMPTE170M:
case V4L2_COLORSPACE_470_SYSTEM_M:
case V4L2_COLORSPACE_470_SYSTEM_BG:
return CSC_BT601L;
case V4L2_COLORSPACE_REC709:
case V4L2_COLORSPACE_SMPTE240M:
case V4L2_COLORSPACE_DEFAULT:
return CSC_BT709L;
case V4L2_COLORSPACE_JPEG:
return CSC_BT601F;
case V4L2_COLORSPACE_BT2020:
return CSC_BT2020;
default:
return CSC_BT709L;
}
}
/*
* colorspace path:
* Input Win csc Output
* 1. YUV(2020) --> Y2R->2020To709->R2Y --> YUV_OUTPUT(601/709)
* RGB --> R2Y __/
*
* 2. YUV(2020) --> bypasss --> YUV_OUTPUT(2020)
* RGB --> 709To2020->R2Y __/
*
* 3. YUV(2020) --> Y2R->2020To709 --> RGB_OUTPUT(709)
* RGB --> R2Y __/
*
* 4. YUV(601/709)-> Y2R->709To2020->R2Y --> YUV_OUTPUT(2020)
* RGB --> 709To2020->R2Y __/
*
* 5. YUV(601/709)-> bypass --> YUV_OUTPUT(709)
* RGB --> R2Y __/
*
* 6. YUV(601/709)-> bypass --> YUV_OUTPUT(601)
* RGB --> R2Y(601) __/
*
* 7. YUV --> Y2R(709) --> RGB_OUTPUT(709)
* RGB --> bypass __/
*
* 8. RGB --> 709To2020->R2Y --> YUV_OUTPUT(2020)
*
* 9. RGB --> R2Y(709) --> YUV_OUTPUT(709)
*
* 10. RGB --> R2Y(601) --> YUV_OUTPUT(601)
*
* 11. RGB --> bypass --> RGB_OUTPUT(709)
*/
static void vop2_setup_csc_mode(struct vop2_video_port *vp,
struct vop2_win *win,
struct drm_plane_state *pstate)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
int is_input_yuv = pstate->fb->format->is_yuv;
int is_output_yuv = is_yuv_output(vcstate->bus_format);
int input_csc = V4L2_COLORSPACE_DEFAULT;
int output_csc = vcstate->color_space;
bool r2y_en, y2r_en;
int csc_mode;
if (is_input_yuv && !is_output_yuv) {
y2r_en = true;
r2y_en = false;
csc_mode = vop2_convert_csc_mode(input_csc);
} else if (!is_input_yuv && is_output_yuv) {
y2r_en = false;
r2y_en = true;
csc_mode = vop2_convert_csc_mode(output_csc);
} else {
y2r_en = false;
r2y_en = false;
csc_mode = false;
}
vop2_win_write(win, VOP2_WIN_Y2R_EN, y2r_en);
vop2_win_write(win, VOP2_WIN_R2Y_EN, r2y_en);
vop2_win_write(win, VOP2_WIN_CSC_MODE, csc_mode);
}
static void vop2_crtc_enable_irq(struct vop2_video_port *vp, u32 irq)
{
struct vop2 *vop2 = vp->vop2;
vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irq << 16 | irq);
vop2_writel(vop2, RK3568_VP_INT_EN(vp->id), irq << 16 | irq);
}
static void vop2_crtc_disable_irq(struct vop2_video_port *vp, u32 irq)
{
struct vop2 *vop2 = vp->vop2;
vop2_writel(vop2, RK3568_VP_INT_EN(vp->id), irq << 16);
}
static int vop2_core_clks_prepare_enable(struct vop2 *vop2)
{
int ret;
ret = clk_prepare_enable(vop2->hclk);
if (ret < 0) {
drm_err(vop2->drm, "failed to enable hclk - %d\n", ret);
return ret;
}
ret = clk_prepare_enable(vop2->aclk);
if (ret < 0) {
drm_err(vop2->drm, "failed to enable aclk - %d\n", ret);
goto err;
}
return 0;
err:
clk_disable_unprepare(vop2->hclk);
return ret;
}
static void vop2_enable(struct vop2 *vop2)
{
int ret;
ret = pm_runtime_resume_and_get(vop2->dev);
if (ret < 0) {
drm_err(vop2->drm, "failed to get pm runtime: %d\n", ret);
return;
}
ret = vop2_core_clks_prepare_enable(vop2);
if (ret) {
pm_runtime_put_sync(vop2->dev);
return;
}
ret = rockchip_drm_dma_attach_device(vop2->drm, vop2->dev);
if (ret) {
drm_err(vop2->drm, "failed to attach dma mapping, %d\n", ret);
return;
}
regcache_sync(vop2->map);
if (vop2->data->soc_id == 3566)
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
/*
* Disable auto gating, this is a workaround to
* avoid display image shift when a window enabled.
*/
regmap_clear_bits(vop2->map, RK3568_SYS_AUTO_GATING_CTRL,
RK3568_SYS_AUTO_GATING_CTRL__AUTO_GATING_EN);
vop2_writel(vop2, RK3568_SYS0_INT_CLR,
VOP2_INT_BUS_ERRPR << 16 | VOP2_INT_BUS_ERRPR);
vop2_writel(vop2, RK3568_SYS0_INT_EN,
VOP2_INT_BUS_ERRPR << 16 | VOP2_INT_BUS_ERRPR);
vop2_writel(vop2, RK3568_SYS1_INT_CLR,
VOP2_INT_BUS_ERRPR << 16 | VOP2_INT_BUS_ERRPR);
vop2_writel(vop2, RK3568_SYS1_INT_EN,
VOP2_INT_BUS_ERRPR << 16 | VOP2_INT_BUS_ERRPR);
}
static void vop2_disable(struct vop2 *vop2)
{
rockchip_drm_dma_detach_device(vop2->drm, vop2->dev);
pm_runtime_put_sync(vop2->dev);
regcache_mark_dirty(vop2->map);
clk_disable_unprepare(vop2->aclk);
clk_disable_unprepare(vop2->hclk);
}
static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct vop2 *vop2 = vp->vop2;
struct drm_crtc_state *old_crtc_state;
int ret;
vop2_lock(vop2);
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
drm_crtc_vblank_off(crtc);
/*
* Vop standby will take effect at end of current frame,
* if dsp hold valid irq happen, it means standby complete.
*
* we must wait standby complete when we want to disable aclk,
* if not, memory bus maybe dead.
*/
reinit_completion(&vp->dsp_hold_completion);
vop2_crtc_enable_irq(vp, VP_INT_DSP_HOLD_VALID);
vop2_vp_write(vp, RK3568_VP_DSP_CTRL, RK3568_VP_DSP_CTRL__STANDBY);
ret = wait_for_completion_timeout(&vp->dsp_hold_completion,
msecs_to_jiffies(50));
if (!ret)
drm_info(vop2->drm, "wait for vp%d dsp_hold timeout\n", vp->id);
vop2_crtc_disable_irq(vp, VP_INT_DSP_HOLD_VALID);
clk_disable_unprepare(vp->dclk);
vop2->enable_count--;
if (!vop2->enable_count)
vop2_disable(vop2);
vop2_unlock(vop2);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
}
static int vop2_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *astate)
{
struct drm_plane_state *pstate = drm_atomic_get_new_plane_state(astate, plane);
struct drm_framebuffer *fb = pstate->fb;
struct drm_crtc *crtc = pstate->crtc;
struct drm_crtc_state *cstate;
struct vop2_video_port *vp;
struct vop2 *vop2;
const struct vop2_data *vop2_data;
struct drm_rect *dest = &pstate->dst;
struct drm_rect *src = &pstate->src;
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
int format;
int ret;
if (!crtc)
return 0;
vp = to_vop2_video_port(crtc);
vop2 = vp->vop2;
vop2_data = vop2->data;
cstate = drm_atomic_get_existing_crtc_state(pstate->state, crtc);
if (WARN_ON(!cstate))
return -EINVAL;
ret = drm_atomic_helper_check_plane_state(pstate, cstate,
min_scale, max_scale,
true, true);
if (ret)
return ret;
if (!pstate->visible)
return 0;
format = vop2_convert_format(fb->format->format);
if (format < 0)
return format;
if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 ||
drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) {
drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,
drm_rect_width(dest), drm_rect_height(dest));
pstate->visible = false;
return 0;
}
if (drm_rect_width(src) >> 16 > vop2_data->max_input.width ||
drm_rect_height(src) >> 16 > vop2_data->max_input.height) {
drm_err(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n",
drm_rect_width(src) >> 16,
drm_rect_height(src) >> 16,
vop2_data->max_input.width,
vop2_data->max_input.height);
return -EINVAL;
}
/*
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
if (fb->format->is_yuv && ((pstate->src.x1 >> 16) % 2)) {
drm_err(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
return 0;
}
static void vop2_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_pstate = NULL;
struct vop2_win *win = to_vop2_win(plane);
struct vop2 *vop2 = win->vop2;
drm_dbg(vop2->drm, "%s disable\n", win->data->name);
if (state)
old_pstate = drm_atomic_get_old_plane_state(state, plane);
if (old_pstate && !old_pstate->crtc)
return;
vop2_win_disable(win);
vop2_win_write(win, VOP2_WIN_YUV_CLIP, 0);
}
/*
* The color key is 10 bit, so all format should
* convert to 10 bit here.
*/
static void vop2_plane_setup_color_key(struct drm_plane *plane, u32 color_key)
{
struct drm_plane_state *pstate = plane->state;
struct drm_framebuffer *fb = pstate->fb;
struct vop2_win *win = to_vop2_win(plane);
u32 color_key_en = 0;
u32 r = 0;
u32 g = 0;
u32 b = 0;
if (!(color_key & VOP2_COLOR_KEY_MASK) || fb->format->is_yuv) {
vop2_win_write(win, VOP2_WIN_COLOR_KEY_EN, 0);
return;
}
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
r = (color_key & 0xf800) >> 11;
g = (color_key & 0x7e0) >> 5;
b = (color_key & 0x1f);
r <<= 5;
g <<= 4;
b <<= 5;
color_key_en = 1;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
r = (color_key & 0xff0000) >> 16;
g = (color_key & 0xff00) >> 8;
b = (color_key & 0xff);
r <<= 2;
g <<= 2;
b <<= 2;
color_key_en = 1;
break;
}
vop2_win_write(win, VOP2_WIN_COLOR_KEY_EN, color_key_en);
vop2_win_write(win, VOP2_WIN_COLOR_KEY, (r << 20) | (g << 10) | b);
}
static void vop2_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *pstate = plane->state;
struct drm_crtc *crtc = pstate->crtc;
struct vop2_win *win = to_vop2_win(plane);
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
struct vop2 *vop2 = win->vop2;
struct drm_framebuffer *fb = pstate->fb;
u32 bpp = fb->format->cpp[0] * 8;
u32 actual_w, actual_h, dsp_w, dsp_h;
u32 act_info, dsp_info;
u32 format;
u32 afbc_format;
u32 rb_swap;
u32 uv_swap;
struct drm_rect *src = &pstate->src;
struct drm_rect *dest = &pstate->dst;
u32 afbc_tile_num;
u32 transform_offset;
bool dither_up;
bool xmirror = pstate->rotation & DRM_MODE_REFLECT_X ? true : false;
bool ymirror = pstate->rotation & DRM_MODE_REFLECT_Y ? true : false;
bool rotate_270 = pstate->rotation & DRM_MODE_ROTATE_270;
bool rotate_90 = pstate->rotation & DRM_MODE_ROTATE_90;
struct rockchip_gem_object *rk_obj;
unsigned long offset;
bool afbc_en;
dma_addr_t yrgb_mst;
dma_addr_t uv_mst;
/*
* can't update plane when vop2 is disabled.
*/
if (WARN_ON(!crtc))
return;
if (!pstate->visible) {
vop2_plane_atomic_disable(plane, state);
return;
}
afbc_en = rockchip_afbc(plane, fb->modifier);
offset = (src->x1 >> 16) * fb->format->cpp[0];
/*
* AFBC HDR_PTR must set to the zero offset of the framebuffer.
*/
if (afbc_en)
offset = 0;
else if (pstate->rotation & DRM_MODE_REFLECT_Y)
offset += ((src->y2 >> 16) - 1) * fb->pitches[0];
else
offset += (src->y1 >> 16) * fb->pitches[0];
rk_obj = to_rockchip_obj(fb->obj[0]);
yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
if (fb->format->is_yuv) {
int hsub = fb->format->hsub;
int vsub = fb->format->vsub;
offset = (src->x1 >> 16) * fb->format->cpp[1] / hsub;
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
if ((pstate->rotation & DRM_MODE_REFLECT_Y) && !afbc_en)
offset += fb->pitches[1] * ((pstate->src_h >> 16) - 2) / vsub;
rk_obj = to_rockchip_obj(fb->obj[0]);
uv_mst = rk_obj->dma_addr + offset + fb->offsets[1];
}
actual_w = drm_rect_width(src) >> 16;
actual_h = drm_rect_height(src) >> 16;
dsp_w = drm_rect_width(dest);
if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
drm_err(vop2->drm, "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
dsp_w = adjusted_mode->hdisplay - dest->x1;
if (dsp_w < 4)
dsp_w = 4;
actual_w = dsp_w * actual_w / drm_rect_width(dest);
}
dsp_h = drm_rect_height(dest);
if (dest->y1 + dsp_h > adjusted_mode->vdisplay) {
drm_err(vop2->drm, "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
dsp_h = adjusted_mode->vdisplay - dest->y1;
if (dsp_h < 4)
dsp_h = 4;
actual_h = dsp_h * actual_h / drm_rect_height(dest);
}
/*
* This is workaround solution for IC design:
* esmart can't support scale down when actual_w % 16 == 1.
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
if (actual_w > dsp_w && (actual_w & 0xf) == 1) {
drm_err(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
vp->id, win->data->name, actual_w);
actual_w -= 1;
}
}
if (afbc_en && actual_w % 4) {
drm_err(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
vp->id, win->data->name, actual_w);
actual_w = ALIGN_DOWN(actual_w, 4);
}
act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff);
format = vop2_convert_format(fb->format->format);
drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n",
vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h,
dest->x1, dest->y1,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
if (afbc_en) {
u32 stride;
/* the afbc superblock is 16 x 16 */
afbc_format = vop2_convert_afbc_format(fb->format->format);
/* Enable color transform for YTR */
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
afbc_format |= (1 << 4);
afbc_tile_num = ALIGN(actual_w, 16) >> 4;
/*
* AFBC pic_vir_width is count by pixel, this is different
* with WIN_VIR_STRIDE.
*/
stride = (fb->pitches[0] << 3) / bpp;
if ((stride & 0x3f) && (xmirror || rotate_90 || rotate_270))
drm_err(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
vp->id, win->data->name, stride);
rb_swap = vop2_afbc_rb_swap(fb->format->format);
uv_swap = vop2_afbc_uv_swap(fb->format->format);
/*
* This is a workaround for crazy IC design, Cluster
* and Esmart/Smart use different format configuration map:
* YUV420_10BIT: 0x10 for Cluster, 0x14 for Esmart/Smart.
*
* This is one thing we can make the convert simple:
* AFBCD decode all the YUV data to YUV444. So we just
* set all the yuv 10 bit to YUV444_10.
*/
if (fb->format->is_yuv && bpp == 10)
format = VOP2_CLUSTER_YUV444_10;
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 1);
vop2_win_write(win, VOP2_WIN_AFBC_FORMAT, afbc_format);
vop2_win_write(win, VOP2_WIN_AFBC_RB_SWAP, rb_swap);
vop2_win_write(win, VOP2_WIN_AFBC_UV_SWAP, uv_swap);
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
if (pstate->rotation & (DRM_MODE_ROTATE_270 | DRM_MODE_ROTATE_90)) {
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, 0);
transform_offset = vop2_afbc_transform_offset(pstate, false);
} else {
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, 1);
transform_offset = vop2_afbc_transform_offset(pstate, true);
}
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_OFFSET, ((src->x1 >> 16) | src->y1));
vop2_win_write(win, VOP2_WIN_AFBC_DSP_OFFSET, (dest->x1 | (dest->y1 << 16)));
vop2_win_write(win, VOP2_WIN_AFBC_PIC_VIR_WIDTH, stride);
vop2_win_write(win, VOP2_WIN_AFBC_TILE_NUM, afbc_tile_num);
vop2_win_write(win, VOP2_WIN_XMIRROR, xmirror);
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270);
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90);
} else {
vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
}
vop2_win_write(win, VOP2_WIN_YMIRROR, ymirror);
if (rotate_90 || rotate_270) {
act_info = swahw32(act_info);
actual_w = drm_rect_height(src) >> 16;
actual_h = drm_rect_width(src) >> 16;
}
vop2_win_write(win, VOP2_WIN_FORMAT, format);
vop2_win_write(win, VOP2_WIN_YRGB_MST, yrgb_mst);
rb_swap = vop2_win_rb_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap);
if (!vop2_cluster_window(win)) {
uv_swap = vop2_win_uv_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
}
if (fb->format->is_yuv) {
vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4));
vop2_win_write(win, VOP2_WIN_UV_MST, uv_mst);
}
vop2_setup_scale(vop2, win, actual_w, actual_h, dsp_w, dsp_h, fb->format->format);
if (!vop2_cluster_window(win))
vop2_plane_setup_color_key(plane, 0);
vop2_win_write(win, VOP2_WIN_ACT_INFO, act_info);
vop2_win_write(win, VOP2_WIN_DSP_INFO, dsp_info);
vop2_win_write(win, VOP2_WIN_DSP_ST, dest->y1 << 16 | (dest->x1 & 0xffff));
vop2_setup_csc_mode(vp, win, pstate);
dither_up = vop2_win_dither_up(fb->format->format);
vop2_win_write(win, VOP2_WIN_DITHER_UP, dither_up);
vop2_win_write(win, VOP2_WIN_ENABLE, 1);
if (vop2_cluster_window(win)) {
int lb_mode = vop2_get_cluster_lb_mode(win, pstate);
vop2_win_write(win, VOP2_WIN_CLUSTER_LB_MODE, lb_mode);
vop2_win_write(win, VOP2_WIN_CLUSTER_ENABLE, 1);
}
}
static const struct drm_plane_helper_funcs vop2_plane_helper_funcs = {
.atomic_check = vop2_plane_atomic_check,
.atomic_update = vop2_plane_atomic_update,
.atomic_disable = vop2_plane_atomic_disable,
};
static const struct drm_plane_funcs vop2_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.format_mod_supported = rockchip_vop2_mod_supported,
};
static int vop2_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
vop2_crtc_enable_irq(vp, VP_INT_FS_FIELD);
return 0;
}
static void vop2_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
vop2_crtc_disable_irq(vp, VP_INT_FS_FIELD);
}
static bool vop2_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
drm_mode_set_crtcinfo(adj_mode, CRTC_INTERLACE_HALVE_V |
CRTC_STEREO_DOUBLE);
return true;
}
static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
switch (vcstate->bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
*dsp_ctrl |= RK3568_VP_DSP_CTRL__DITHER_DOWN_EN;
break;
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
*dsp_ctrl |= RK3568_VP_DSP_CTRL__DITHER_DOWN_EN;
*dsp_ctrl |= RGB888_TO_RGB666;
break;
case MEDIA_BUS_FMT_YUV8_1X24:
case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
*dsp_ctrl |= RK3568_VP_DSP_CTRL__PRE_DITHER_DOWN_EN;
break;
default:
break;
}
if (vcstate->output_mode != ROCKCHIP_OUT_MODE_AAAA)
*dsp_ctrl |= RK3568_VP_DSP_CTRL__PRE_DITHER_DOWN_EN;
*dsp_ctrl |= FIELD_PREP(RK3568_VP_DSP_CTRL__DITHER_DOWN_SEL,
DITHER_DOWN_ALLEGRO);
}
static void vop2_post_config(struct drm_crtc *crtc)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u16 vtotal = mode->crtc_vtotal;
u16 hdisplay = mode->crtc_hdisplay;
u16 hact_st = mode->crtc_htotal - mode->crtc_hsync_start;
u16 vdisplay = mode->crtc_vdisplay;
u16 vact_st = mode->crtc_vtotal - mode->crtc_vsync_start;
u32 left_margin = 100, right_margin = 100;
u32 top_margin = 100, bottom_margin = 100;
u16 hsize = hdisplay * (left_margin + right_margin) / 200;
u16 vsize = vdisplay * (top_margin + bottom_margin) / 200;
u16 hact_end, vact_end;
u32 val;
vsize = rounddown(vsize, 2);
hsize = rounddown(hsize, 2);
hact_st += hdisplay * (100 - left_margin) / 200;
hact_end = hact_st + hsize;
val = hact_st << 16;
val |= hact_end;
vop2_vp_write(vp, RK3568_VP_POST_DSP_HACT_INFO, val);
vact_st += vdisplay * (100 - top_margin) / 200;
vact_end = vact_st + vsize;
val = vact_st << 16;
val |= vact_end;
vop2_vp_write(vp, RK3568_VP_POST_DSP_VACT_INFO, val);
val = scl_cal_scale2(vdisplay, vsize) << 16;
val |= scl_cal_scale2(hdisplay, hsize);
vop2_vp_write(vp, RK3568_VP_POST_SCL_FACTOR_YRGB, val);
val = 0;
if (hdisplay != hsize)
val |= RK3568_VP_POST_SCL_CTRL__HSCALEDOWN;
if (vdisplay != vsize)
val |= RK3568_VP_POST_SCL_CTRL__VSCALEDOWN;
vop2_vp_write(vp, RK3568_VP_POST_SCL_CTRL, val);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
u16 vact_st_f1 = vtotal + vact_st + 1;
u16 vact_end_f1 = vact_st_f1 + vsize;
val = vact_st_f1 << 16 | vact_end_f1;
vop2_vp_write(vp, RK3568_VP_POST_DSP_VACT_INFO_F1, val);
}
vop2_vp_write(vp, RK3568_VP_DSP_BG, 0);
}
static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
u32 polflags)
{
struct vop2 *vop2 = vp->vop2;
u32 die, dip;
die = vop2_readl(vop2, RK3568_DSP_IF_EN);
dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
switch (id) {
case ROCKCHIP_VOP2_EP_RGB0:
die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_RGB |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
if (polflags & POLFLAG_DCLK_INV)
regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
else
regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
break;
case ROCKCHIP_VOP2_EP_HDMI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI1:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_LVDS0:
die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_LVDS1:
die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id);
dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
break;
default:
drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
return;
}
dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
vop2_writel(vop2, RK3568_DSP_IF_EN, die);
vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
}
static int us_to_vertical_line(struct drm_display_mode *mode, int us)
{
return us * mode->clock / mode->htotal / 1000;
}
static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct vop2 *vop2 = vp->vop2;
const struct vop2_data *vop2_data = vop2->data;
const struct vop2_video_port_data *vp_data = &vop2_data->vp[vp->id];
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
unsigned long clock = mode->crtc_clock * 1000;
u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
u16 hdisplay = mode->crtc_hdisplay;
u16 htotal = mode->crtc_htotal;
u16 hact_st = mode->crtc_htotal - mode->crtc_hsync_start;
u16 hact_end = hact_st + hdisplay;
u16 vdisplay = mode->crtc_vdisplay;
u16 vtotal = mode->crtc_vtotal;
u16 vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
u16 vact_st = mode->crtc_vtotal - mode->crtc_vsync_start;
u16 vact_end = vact_st + vdisplay;
u8 out_mode;
u32 dsp_ctrl = 0;
int act_end;
u32 val, polflags;
int ret;
struct drm_encoder *encoder;
drm_dbg(vop2->drm, "Update mode to %dx%d%s%d, type: %d for vp%d\n",
hdisplay, vdisplay, mode->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "p",
drm_mode_vrefresh(mode), vcstate->output_type, vp->id);
vop2_lock(vop2);
ret = clk_prepare_enable(vp->dclk);
if (ret < 0) {
drm_err(vop2->drm, "failed to enable dclk for video port%d - %d\n",
vp->id, ret);
vop2_unlock(vop2);
return;
}
if (!vop2->enable_count)
vop2_enable(vop2);
vop2->enable_count++;
vop2_crtc_enable_irq(vp, VP_INT_POST_BUF_EMPTY);
polflags = 0;
if (vcstate->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
polflags |= POLFLAG_DCLK_INV;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
polflags |= BIT(HSYNC_POSITIVE);
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
polflags |= BIT(VSYNC_POSITIVE);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
rk3568_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
!(vp_data->feature & VOP_FEATURE_OUTPUT_10BIT))
out_mode = ROCKCHIP_OUT_MODE_P888;
else
out_mode = vcstate->output_mode;
dsp_ctrl |= FIELD_PREP(RK3568_VP_DSP_CTRL__OUT_MODE, out_mode);
if (vop2_output_uv_swap(vcstate->bus_format, vcstate->output_mode))
dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_RB_SWAP;
if (is_yuv_output(vcstate->bus_format))
dsp_ctrl |= RK3568_VP_DSP_CTRL__POST_DSP_OUT_R2Y;
vop2_dither_setup(crtc, &dsp_ctrl);
vop2_vp_write(vp, RK3568_VP_DSP_HTOTAL_HS_END, (htotal << 16) | hsync_len);
val = hact_st << 16;
val |= hact_end;
vop2_vp_write(vp, RK3568_VP_DSP_HACT_ST_END, val);
val = vact_st << 16;
val |= vact_end;
vop2_vp_write(vp, RK3568_VP_DSP_VACT_ST_END, val);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
u16 vact_st_f1 = vtotal + vact_st + 1;
u16 vact_end_f1 = vact_st_f1 + vdisplay;
val = vact_st_f1 << 16 | vact_end_f1;
vop2_vp_write(vp, RK3568_VP_DSP_VACT_ST_END_F1, val);
val = vtotal << 16 | (vtotal + vsync_len);
vop2_vp_write(vp, RK3568_VP_DSP_VS_ST_END_F1, val);
dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_INTERLACE;
dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_FILED_POL;
dsp_ctrl |= RK3568_VP_DSP_CTRL__P2I_EN;
vtotal += vtotal + 1;
act_end = vact_end_f1;
} else {
act_end = vact_end;
}
vop2_writel(vop2, RK3568_VP_LINE_FLAG(vp->id),
(act_end - us_to_vertical_line(mode, 0)) << 16 | act_end);
vop2_vp_write(vp, RK3568_VP_DSP_VTOTAL_VS_END, vtotal << 16 | vsync_len);
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
dsp_ctrl |= RK3568_VP_DSP_CTRL__CORE_DCLK_DIV;
clock *= 2;
}
vop2_vp_write(vp, RK3568_VP_MIPI_CTRL, 0);
clk_set_rate(vp->dclk, clock);
vop2_post_config(crtc);
vop2_cfg_done(vp);
vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
drm_crtc_vblank_on(crtc);
vop2_unlock(vop2);
}
static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct drm_plane *plane;
int nplanes = 0;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
nplanes++;
if (nplanes > vp->nlayers)
return -EINVAL;
return 0;
}
static bool is_opaque(u16 alpha)
{
return (alpha >> 8) == 0xff;
}
static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
struct vop2_alpha *alpha)
{
int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1;
int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1;
int src_color_mode = alpha_config->src_premulti_en ?
ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
int dst_color_mode = alpha_config->dst_premulti_en ?
ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
alpha->src_color_ctrl.val = 0;
alpha->dst_color_ctrl.val = 0;
alpha->src_alpha_ctrl.val = 0;
alpha->dst_alpha_ctrl.val = 0;
if (!alpha_config->src_pixel_alpha_en)
alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en)
alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX;
else
alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
alpha->src_color_ctrl.bits.alpha_en = 1;
if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) {
alpha->src_color_ctrl.bits.color_mode = src_color_mode;
alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
} else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) {
alpha->src_color_ctrl.bits.color_mode = src_color_mode;
alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE;
} else {
alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL;
alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
}
alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8;
alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8;
alpha->dst_color_ctrl.bits.color_mode = dst_color_mode;
alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode;
alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE;
alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en)
alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX;
else
alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION;
alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
}
static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
{
struct vop2_video_port *vp;
int used_layer = 0;
int i;
for (i = 0; i < port_id; i++) {
vp = &vop2->vps[i];
used_layer += hweight32(vp->win_mask);
}
return used_layer;
}
static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
{
u32 offset = (main_win->data->phys_id * 0x10);
struct vop2_alpha_config alpha_config;
struct vop2_alpha alpha;
struct drm_plane_state *bottom_win_pstate;
bool src_pixel_alpha_en = false;
u16 src_glb_alpha_val, dst_glb_alpha_val;
bool premulti_en = false;
bool swap = false;
/* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
bottom_win_pstate = main_win->base.state;
src_glb_alpha_val = 0;
dst_glb_alpha_val = main_win->base.state->alpha;
if (!bottom_win_pstate->fb)
return;
alpha_config.src_premulti_en = premulti_en;
alpha_config.dst_premulti_en = false;
alpha_config.src_pixel_alpha_en = src_pixel_alpha_en;
alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
alpha_config.src_glb_alpha_value = src_glb_alpha_val;
alpha_config.dst_glb_alpha_value = dst_glb_alpha_val;
vop2_parse_alpha(&alpha_config, &alpha);
alpha.src_color_ctrl.bits.src_dst_swap = swap;
vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
alpha.src_color_ctrl.val);
vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
alpha.dst_color_ctrl.val);
vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset,
alpha.src_alpha_ctrl.val);
vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset,
alpha.dst_alpha_ctrl.val);
}
static void vop2_setup_alpha(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
struct drm_framebuffer *fb;
struct vop2_alpha_config alpha_config;
struct vop2_alpha alpha;
struct drm_plane *plane;
int pixel_alpha_en;
int premulti_en, gpremulti_en = 0;
int mixer_id;
u32 offset;
bool bottom_layer_alpha_en = false;
u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
struct vop2_win *win = to_vop2_win(plane);
if (plane->state->normalized_zpos == 0 &&
!is_opaque(plane->state->alpha) &&
!vop2_cluster_window(win)) {
/*
* If bottom layer have global alpha effect [except cluster layer,
* because cluster have deal with bottom layer global alpha value
* at cluster mix], bottom layer mix need deal with global alpha.
*/
bottom_layer_alpha_en = true;
dst_global_alpha = plane->state->alpha;
}
}
drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
struct vop2_win *win = to_vop2_win(plane);
int zpos = plane->state->normalized_zpos;
if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
premulti_en = 1;
else
premulti_en = 0;
plane = &win->base;
fb = plane->state->fb;
pixel_alpha_en = fb->format->has_alpha;
alpha_config.src_premulti_en = premulti_en;
if (bottom_layer_alpha_en && zpos == 1) {
gpremulti_en = premulti_en;
/* Cd = Cs + (1 - As) * Cd * Agd */
alpha_config.dst_premulti_en = false;
alpha_config.src_pixel_alpha_en = pixel_alpha_en;
alpha_config.src_glb_alpha_value = plane->state->alpha;
alpha_config.dst_glb_alpha_value = dst_global_alpha;
} else if (vop2_cluster_window(win)) {
/* Mix output data only have pixel alpha */
alpha_config.dst_premulti_en = true;
alpha_config.src_pixel_alpha_en = true;
alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
} else {
/* Cd = Cs + (1 - As) * Cd */
alpha_config.dst_premulti_en = true;
alpha_config.src_pixel_alpha_en = pixel_alpha_en;
alpha_config.src_glb_alpha_value = plane->state->alpha;
alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
}
vop2_parse_alpha(&alpha_config, &alpha);
offset = (mixer_id + zpos - 1) * 0x10;
vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset,
alpha.src_color_ctrl.val);
vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset,
alpha.dst_color_ctrl.val);
vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset,
alpha.src_alpha_ctrl.val);
vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset,
alpha.dst_alpha_ctrl.val);
}
if (vp->id == 0) {
if (bottom_layer_alpha_en) {
/* Transfer pixel alpha to hdr mix */
alpha_config.src_premulti_en = gpremulti_en;
alpha_config.dst_premulti_en = true;
alpha_config.src_pixel_alpha_en = true;
alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
vop2_parse_alpha(&alpha_config, &alpha);
vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL,
alpha.src_color_ctrl.val);
vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL,
alpha.dst_color_ctrl.val);
vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL,
alpha.src_alpha_ctrl.val);
vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL,
alpha.dst_alpha_ctrl.val);
} else {
vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0);
}
}
}
static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
unsigned int nlayer, ofs;
struct drm_display_mode *adjusted_mode;
u16 hsync_len;
u16 hdisplay;
u32 bg_dly;
u32 pre_scan_dly;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
struct vop2_video_port *vp2 = &vop2->vps[2];
adjusted_mode = &vp->crtc.state->adjusted_mode;
hsync_len = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
hdisplay = adjusted_mode->crtc_hdisplay;
bg_dly = vp->data->pre_scan_max_dly[3];
vop2_writel(vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
vop2_writel(vop2, RK3568_OVL_CTRL, 0);
port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
if (vp0->nlayers)
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX,
vp0->nlayers - 1);
else
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8);
if (vp1->nlayers)
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX,
(vp0->nlayers + vp1->nlayers - 1));
else
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
if (vp2->nlayers)
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
(vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
else
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
ofs = 0;
for (i = 0; i < vp->id; i++)
ofs += vop2->vps[i].nlayers;
nlayer = 0;
drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
struct vop2_win *win = to_vop2_win(plane);
switch (win->data->phys_id) {
case ROCKCHIP_VOP2_CLUSTER0:
port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id);
break;
case ROCKCHIP_VOP2_CLUSTER1:
port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
break;
case ROCKCHIP_VOP2_ESMART0:
port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
break;
case ROCKCHIP_VOP2_ESMART1:
port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
break;
case ROCKCHIP_VOP2_SMART0:
port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
break;
case ROCKCHIP_VOP2_SMART1:
port_sel &= ~RK3568_OVL_PORT_SEL__SMART1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id);
break;
}
layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
0x7);
layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
win->data->layer_sel_id);
nlayer++;
}
/* configure unused layers to 0x5 (reserved) */
for (; nlayer < vp->nlayers; nlayer++) {
layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 0x7);
layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 5);
}
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
vop2_writel(vop2, RK3568_OVL_CTRL, RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD);
}
static void vop2_setup_dly_for_windows(struct vop2 *vop2)
{
struct vop2_win *win;
int i = 0;
u32 cdly = 0, sdly = 0;
for (i = 0; i < vop2->data->win_size; i++) {
u32 dly;
win = &vop2->win[i];
dly = win->delay;
switch (win->data->phys_id) {
case ROCKCHIP_VOP2_CLUSTER0:
cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly);
cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly);
break;
case ROCKCHIP_VOP2_CLUSTER1:
cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly);
cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly);
break;
case ROCKCHIP_VOP2_ESMART0:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly);
break;
case ROCKCHIP_VOP2_ESMART1:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
break;
case ROCKCHIP_VOP2_SMART0:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
break;
case ROCKCHIP_VOP2_SMART1:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
break;
}
}
vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly);
vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly);
}
static void vop2_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct vop2 *vop2 = vp->vop2;
struct drm_plane *plane;
vp->win_mask = 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
struct vop2_win *win = to_vop2_win(plane);
win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
vp->win_mask |= BIT(win->data->phys_id);
if (vop2_cluster_window(win))
vop2_setup_cluster_alpha(vop2, win);
}
if (!vp->win_mask)
return;
vop2_setup_layer_mixer(vp);
vop2_setup_alpha(vp);
vop2_setup_dly_for_windows(vop2);
}
static void vop2_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
vop2_post_config(crtc);
vop2_cfg_done(vp);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
vp->event = crtc->state->event;
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
.mode_fixup = vop2_crtc_mode_fixup,
.atomic_check = vop2_crtc_atomic_check,
.atomic_begin = vop2_crtc_atomic_begin,
.atomic_flush = vop2_crtc_atomic_flush,
.atomic_enable = vop2_crtc_atomic_enable,
.atomic_disable = vop2_crtc_atomic_disable,
};
static void vop2_crtc_reset(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
if (crtc->state) {
__drm_atomic_helper_crtc_destroy_state(crtc->state);
kfree(vcstate);
}
vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
if (!vcstate)
return;
crtc->state = &vcstate->base;
crtc->state->crtc = crtc;
}
static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *vcstate, *old_vcstate;
old_vcstate = to_rockchip_crtc_state(crtc->state);
vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
if (!vcstate)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &vcstate->base);
return &vcstate->base;
}
static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(&vcstate->base);
kfree(vcstate);
}
static const struct drm_crtc_funcs vop2_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = drm_crtc_cleanup,
.reset = vop2_crtc_reset,
.atomic_duplicate_state = vop2_crtc_duplicate_state,
.atomic_destroy_state = vop2_crtc_destroy_state,
.enable_vblank = vop2_crtc_enable_vblank,
.disable_vblank = vop2_crtc_disable_vblank,
};
static irqreturn_t vop2_isr(int irq, void *data)
{
struct vop2 *vop2 = data;
const struct vop2_data *vop2_data = vop2->data;
u32 axi_irqs[VOP2_SYS_AXI_BUS_NUM];
int ret = IRQ_NONE;
int i;
/*
* The irq is shared with the iommu. If the runtime-pm state of the
* vop2-device is disabled the irq has to be targeted at the iommu.
*/
if (!pm_runtime_get_if_in_use(vop2->dev))
return IRQ_NONE;
for (i = 0; i < vop2_data->nr_vps; i++) {
struct vop2_video_port *vp = &vop2->vps[i];
struct drm_crtc *crtc = &vp->crtc;
u32 irqs;
irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
if (irqs & VP_INT_DSP_HOLD_VALID) {
complete(&vp->dsp_hold_completion);
ret = IRQ_HANDLED;
}
if (irqs & VP_INT_FS_FIELD) {
drm_crtc_handle_vblank(crtc);
spin_lock(&crtc->dev->event_lock);
if (vp->event) {
u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
if (!(val & BIT(vp->id))) {
drm_crtc_send_vblank_event(crtc, vp->event);
vp->event = NULL;
drm_crtc_vblank_put(crtc);
}
}
spin_unlock(&crtc->dev->event_lock);
ret = IRQ_HANDLED;
}
if (irqs & VP_INT_POST_BUF_EMPTY) {
drm_err_ratelimited(vop2->drm,
"POST_BUF_EMPTY irq err at vp%d\n",
vp->id);
ret = IRQ_HANDLED;
}
}
axi_irqs[0] = vop2_readl(vop2, RK3568_SYS0_INT_STATUS);
vop2_writel(vop2, RK3568_SYS0_INT_CLR, axi_irqs[0] << 16 | axi_irqs[0]);
axi_irqs[1] = vop2_readl(vop2, RK3568_SYS1_INT_STATUS);
vop2_writel(vop2, RK3568_SYS1_INT_CLR, axi_irqs[1] << 16 | axi_irqs[1]);
for (i = 0; i < ARRAY_SIZE(axi_irqs); i++) {
if (axi_irqs[i] & VOP2_INT_BUS_ERRPR) {
drm_err_ratelimited(vop2->drm, "BUS_ERROR irq err\n");
ret = IRQ_HANDLED;
}
}
pm_runtime_put(vop2->dev);
return ret;
}
static int vop2_plane_init(struct vop2 *vop2, struct vop2_win *win,
unsigned long possible_crtcs)
{
const struct vop2_win_data *win_data = win->data;
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
int ret;
ret = drm_universal_plane_init(vop2->drm, &win->base, possible_crtcs,
&vop2_plane_funcs, win_data->formats,
win_data->nformats,
win_data->format_modifiers,
win->type, win_data->name);
if (ret) {
drm_err(vop2->drm, "failed to initialize plane %d\n", ret);
return ret;
}
drm_plane_helper_add(&win->base, &vop2_plane_helper_funcs);
if (win->data->supported_rotations)
drm_plane_create_rotation_property(&win->base, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
win->data->supported_rotations);
drm_plane_create_alpha_property(&win->base);
drm_plane_create_blend_mode_property(&win->base, blend_caps);
drm_plane_create_zpos_property(&win->base, win->win_id, 0,
vop2->registered_num_wins - 1);
return 0;
}
static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2)
{
int i;
for (i = 0; i < vop2->data->nr_vps; i++) {
struct vop2_video_port *vp = &vop2->vps[i];
if (!vp->crtc.port)
continue;
if (vp->primary_plane)
continue;
return vp;
}
return NULL;
}
#define NR_LAYERS 6
static int vop2_create_crtcs(struct vop2 *vop2)
{
const struct vop2_data *vop2_data = vop2->data;
struct drm_device *drm = vop2->drm;
struct device *dev = vop2->dev;
struct drm_plane *plane;
struct device_node *port;
struct vop2_video_port *vp;
int i, nvp, nvps = 0;
int ret;
for (i = 0; i < vop2_data->nr_vps; i++) {
const struct vop2_video_port_data *vp_data;
struct device_node *np;
char dclk_name[9];
vp_data = &vop2_data->vp[i];
vp = &vop2->vps[i];
vp->vop2 = vop2;
vp->id = vp_data->id;
vp->regs = vp_data->regs;
vp->data = vp_data;
snprintf(dclk_name, sizeof(dclk_name), "dclk_vp%d", vp->id);
vp->dclk = devm_clk_get(vop2->dev, dclk_name);
if (IS_ERR(vp->dclk)) {
drm_err(vop2->drm, "failed to get %s\n", dclk_name);
return PTR_ERR(vp->dclk);
}
np = of_graph_get_remote_node(dev->of_node, i, -1);
if (!np) {
drm_dbg(vop2->drm, "%s: No remote for vp%d\n", __func__, i);
continue;
}
of_node_put(np);
port = of_graph_get_port_by_id(dev->of_node, i);
if (!port) {
drm_err(vop2->drm, "no port node found for video_port%d\n", i);
return -ENOENT;
}
vp->crtc.port = port;
nvps++;
}
nvp = 0;
for (i = 0; i < vop2->registered_num_wins; i++) {
struct vop2_win *win = &vop2->win[i];
u32 possible_crtcs = 0;
if (vop2->data->soc_id == 3566) {
/*
* On RK3566 these windows don't have an independent
* framebuffer. They share the framebuffer with smart0,
* esmart0 and cluster0 respectively.
*/
switch (win->data->phys_id) {
case ROCKCHIP_VOP2_SMART1:
case ROCKCHIP_VOP2_ESMART1:
case ROCKCHIP_VOP2_CLUSTER1:
continue;
}
}
if (win->type == DRM_PLANE_TYPE_PRIMARY) {
vp = find_vp_without_primary(vop2);
if (vp) {
possible_crtcs = BIT(nvp);
vp->primary_plane = win;
nvp++;
} else {
/* change the unused primary window to overlay window */
win->type = DRM_PLANE_TYPE_OVERLAY;
}
}
if (win->type == DRM_PLANE_TYPE_OVERLAY)
possible_crtcs = (1 << nvps) - 1;
ret = vop2_plane_init(vop2, win, possible_crtcs);
if (ret) {
drm_err(vop2->drm, "failed to init plane %s: %d\n",
win->data->name, ret);
return ret;
}
}
for (i = 0; i < vop2_data->nr_vps; i++) {
vp = &vop2->vps[i];
if (!vp->crtc.port)
continue;
plane = &vp->primary_plane->base;
ret = drm_crtc_init_with_planes(drm, &vp->crtc, plane, NULL,
&vop2_crtc_funcs,
"video_port%d", vp->id);
if (ret) {
drm_err(vop2->drm, "crtc init for video_port%d failed\n", i);
return ret;
}
drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs);
init_completion(&vp->dsp_hold_completion);
}
/*
* On the VOP2 it's very hard to change the number of layers on a VP
* during runtime, so we distribute the layers equally over the used
* VPs
*/
for (i = 0; i < vop2->data->nr_vps; i++) {
struct vop2_video_port *vp = &vop2->vps[i];
if (vp->crtc.port)
vp->nlayers = NR_LAYERS / nvps;
}
return 0;
}
static void vop2_destroy_crtcs(struct vop2 *vop2)
{
struct drm_device *drm = vop2->drm;
struct list_head *crtc_list = &drm->mode_config.crtc_list;
struct list_head *plane_list = &drm->mode_config.plane_list;
struct drm_crtc *crtc, *tmpc;
struct drm_plane *plane, *tmpp;
list_for_each_entry_safe(plane, tmpp, plane_list, head)
drm_plane_cleanup(plane);
/*
* Destroy CRTC after vop2_plane_destroy() since vop2_disable_plane()
* references the CRTC.
*/
list_for_each_entry_safe(crtc, tmpc, crtc_list, head) {
of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
}
}
static int vop2_find_rgb_encoder(struct vop2 *vop2)
{
struct device_node *node = vop2->dev->of_node;
struct device_node *endpoint;
int i;
for (i = 0; i < vop2->data->nr_vps; i++) {
endpoint = of_graph_get_endpoint_by_regs(node, i,
ROCKCHIP_VOP2_EP_RGB0);
if (!endpoint)
continue;
of_node_put(endpoint);
return i;
}
return -ENOENT;
}
static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
[VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
[VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
[VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
[VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
[VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
[VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
[VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
[VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
[VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
[VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
[VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
[VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
[VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
[VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13),
[VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
[VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
[VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
/* cluster regs */
[VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
[VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
[VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
/* afbc regs */
[VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
[VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
[VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
[VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
[VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
[VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
[VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
[VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
[VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
[VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
[VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
[VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
[VOP2_WIN_AFBC_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET, 0, 31),
[VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
[VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
[VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
[VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff },
[VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
[VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
[VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
[VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
[VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
[VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
};
static int vop2_cluster_init(struct vop2_win *win)
{
struct vop2 *vop2 = win->vop2;
struct reg_field *cluster_regs;
int ret, i;
cluster_regs = kmemdup(vop2_cluster_regs, sizeof(vop2_cluster_regs),
GFP_KERNEL);
if (!cluster_regs)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++)
if (cluster_regs[i].reg != 0xffffffff)
cluster_regs[i].reg += win->offset;
ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg,
cluster_regs,
ARRAY_SIZE(vop2_cluster_regs));
kfree(cluster_regs);
return ret;
};
static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
[VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
[VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
[VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
[VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
[VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
[VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
[VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
[VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
[VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
[VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
[VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
[VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
[VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
[VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
[VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
[VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15),
[VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31),
[VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
[VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
[VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
[VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
[VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9),
[VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11),
[VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13),
[VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15),
[VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
[VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
[VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
[VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
[VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
[VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
[VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
[VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
[VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
};
static int vop2_esmart_init(struct vop2_win *win)
{
struct vop2 *vop2 = win->vop2;
struct reg_field *esmart_regs;
int ret, i;
esmart_regs = kmemdup(vop2_esmart_regs, sizeof(vop2_esmart_regs),
GFP_KERNEL);
if (!esmart_regs)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++)
if (esmart_regs[i].reg != 0xffffffff)
esmart_regs[i].reg += win->offset;
ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg,
esmart_regs,
ARRAY_SIZE(vop2_esmart_regs));
kfree(esmart_regs);
return ret;
};
static int vop2_win_init(struct vop2 *vop2)
{
const struct vop2_data *vop2_data = vop2->data;
struct vop2_win *win;
int i, ret;
for (i = 0; i < vop2_data->win_size; i++) {
const struct vop2_win_data *win_data = &vop2_data->win[i];
win = &vop2->win[i];
win->data = win_data;
win->type = win_data->type;
win->offset = win_data->base;
win->win_id = i;
win->vop2 = vop2;
if (vop2_cluster_window(win))
ret = vop2_cluster_init(win);
else
ret = vop2_esmart_init(win);
if (ret)
return ret;
}
vop2->registered_num_wins = vop2_data->win_size;
return 0;
}
/*
* The window registers are only updated when config done is written.
* Until that they read back the old value. As we read-modify-write
* these registers mark them as non-volatile. This makes sure we read
* the new values from the regmap register cache.
*/
static const struct regmap_range vop2_nonvolatile_range[] = {
regmap_reg_range(0x1000, 0x23ff),
};
static const struct regmap_access_table vop2_volatile_table = {
.no_ranges = vop2_nonvolatile_range,
.n_no_ranges = ARRAY_SIZE(vop2_nonvolatile_range),
};
static const struct regmap_config vop2_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x3000,
.name = "vop2",
.volatile_table = &vop2_volatile_table,
.cache_type = REGCACHE_RBTREE,
};
static int vop2_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct vop2_data *vop2_data;
struct drm_device *drm = data;
struct vop2 *vop2;
struct resource *res;
size_t alloc_size;
int ret;
vop2_data = of_device_get_match_data(dev);
if (!vop2_data)
return -ENODEV;
/* Allocate vop2 struct and its vop2_win array */
alloc_size = struct_size(vop2, win, vop2_data->win_size);
vop2 = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
if (!vop2)
return -ENOMEM;
vop2->dev = dev;
vop2->data = vop2_data;
vop2->drm = drm;
dev_set_drvdata(dev, vop2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vop");
if (!res) {
drm_err(vop2->drm, "failed to get vop2 register byname\n");
return -EINVAL;
}
vop2->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop2->regs))
return PTR_ERR(vop2->regs);
vop2->len = resource_size(res);
vop2->map = devm_regmap_init_mmio(dev, vop2->regs, &vop2_regmap_config);
if (IS_ERR(vop2->map))
return PTR_ERR(vop2->map);
ret = vop2_win_init(vop2);
if (ret)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gamma-lut");
if (res) {
vop2->lut_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop2->lut_regs))
return PTR_ERR(vop2->lut_regs);
}
vop2->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
vop2->hclk = devm_clk_get(vop2->dev, "hclk");
if (IS_ERR(vop2->hclk)) {
drm_err(vop2->drm, "failed to get hclk source\n");
return PTR_ERR(vop2->hclk);
}
vop2->aclk = devm_clk_get(vop2->dev, "aclk");
if (IS_ERR(vop2->aclk)) {
drm_err(vop2->drm, "failed to get aclk source\n");
return PTR_ERR(vop2->aclk);
}
vop2->irq = platform_get_irq(pdev, 0);
if (vop2->irq < 0) {
drm_err(vop2->drm, "cannot find irq for vop2\n");
return vop2->irq;
}
mutex_init(&vop2->vop2_lock);
ret = devm_request_irq(dev, vop2->irq, vop2_isr, IRQF_SHARED, dev_name(dev), vop2);
if (ret)
return ret;
ret = vop2_create_crtcs(vop2);
if (ret)
return ret;
ret = vop2_find_rgb_encoder(vop2);
if (ret >= 0) {
vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc,
vop2->drm, ret);
if (IS_ERR(vop2->rgb)) {
if (PTR_ERR(vop2->rgb) == -EPROBE_DEFER) {
ret = PTR_ERR(vop2->rgb);
goto err_crtcs;
}
vop2->rgb = NULL;
}
}
rockchip_drm_dma_init_device(vop2->drm, vop2->dev);
pm_runtime_enable(&pdev->dev);
return 0;
err_crtcs:
vop2_destroy_crtcs(vop2);
return ret;
}
static void vop2_unbind(struct device *dev, struct device *master, void *data)
{
struct vop2 *vop2 = dev_get_drvdata(dev);
pm_runtime_disable(dev);
if (vop2->rgb)
rockchip_rgb_fini(vop2->rgb);
vop2_destroy_crtcs(vop2);
}
const struct component_ops vop2_component_ops = {
.bind = vop2_bind,
.unbind = vop2_unbind,
};
EXPORT_SYMBOL_GPL(vop2_component_ops);
| linux-master | drivers/gpu/drm/rockchip/rockchip_drm_vop2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <[email protected]>
*
* based on exynos_drm_drv.c
*/
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/component.h>
#include <linux/console.h>
#include <linux/iommu.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#if defined(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#else
#define arm_iommu_detach_device(...) ({ })
#define arm_iommu_release_mapping(...) ({ })
#define to_dma_iommu_mapping(dev) NULL
#endif
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#define DRIVER_NAME "rockchip"
#define DRIVER_DESC "RockChip Soc DRM"
#define DRIVER_DATE "20140818"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
static const struct drm_driver rockchip_drm_driver;
/*
* Attach a (component) device to the shared drm dma mapping from master drm
* device. This is used by the VOPs to map GEM buffers to a common DMA
* mapping.
*/
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
struct device *dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
int ret;
if (!private->domain)
return 0;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
if (mapping) {
arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
}
ret = iommu_attach_device(private->domain, dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
return ret;
}
return 0;
}
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
if (!private->domain)
return;
iommu_detach_device(private->domain, dev);
}
void rockchip_drm_dma_init_device(struct drm_device *drm_dev,
struct device *dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
if (!device_iommu_mapped(dev))
private->iommu_dev = ERR_PTR(-ENODEV);
else if (!private->iommu_dev)
private->iommu_dev = dev;
}
static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
struct iommu_domain_geometry *geometry;
u64 start, end;
if (IS_ERR_OR_NULL(private->iommu_dev))
return 0;
private->domain = iommu_domain_alloc(private->iommu_dev->bus);
if (!private->domain)
return -ENOMEM;
geometry = &private->domain->geometry;
start = geometry->aperture_start;
end = geometry->aperture_end;
DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
start, end);
drm_mm_init(&private->mm, start, end - start + 1);
mutex_init(&private->mm_lock);
return 0;
}
static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
if (!private->domain)
return;
drm_mm_takedown(&private->mm);
iommu_domain_free(private->domain);
}
static int rockchip_drm_bind(struct device *dev)
{
struct drm_device *drm_dev;
struct rockchip_drm_private *private;
int ret;
/* Remove existing drivers that may own the framebuffer memory. */
ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver);
if (ret) {
DRM_DEV_ERROR(dev,
"Failed to remove existing framebuffers - %d.\n",
ret);
return ret;
}
drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
dev_set_drvdata(dev, drm_dev);
private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
if (!private) {
ret = -ENOMEM;
goto err_free;
}
drm_dev->dev_private = private;
ret = drmm_mode_config_init(drm_dev);
if (ret)
goto err_free;
rockchip_drm_mode_config_init(drm_dev);
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
goto err_free;
ret = rockchip_drm_init_iommu(drm_dev);
if (ret)
goto err_unbind_all;
ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
if (ret)
goto err_iommu_cleanup;
drm_mode_config_reset(drm_dev);
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
ret = drm_dev_register(drm_dev, 0);
if (ret)
goto err_kms_helper_poll_fini;
drm_fbdev_generic_setup(drm_dev, 0);
return 0;
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
err_iommu_cleanup:
rockchip_iommu_cleanup(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
err_free:
drm_dev_put(drm_dev);
return ret;
}
static void rockchip_drm_unbind(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
drm_dev_unregister(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
drm_atomic_helper_shutdown(drm_dev);
component_unbind_all(dev, drm_dev);
rockchip_iommu_cleanup(drm_dev);
drm_dev_put(drm_dev);
}
DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops);
static const struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.dumb_create = rockchip_gem_dumb_create,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
#ifdef CONFIG_PM_SLEEP
static int rockchip_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm);
}
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm);
}
#endif
static const struct dev_pm_ops rockchip_drm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
rockchip_drm_sys_resume)
};
#define MAX_ROCKCHIP_SUB_DRIVERS 16
static struct platform_driver *rockchip_sub_drivers[MAX_ROCKCHIP_SUB_DRIVERS];
static int num_rockchip_sub_drivers;
/*
* Get the endpoint id of the remote endpoint of the given encoder. This
* information is used by the VOP2 driver to identify the encoder.
*
* @rkencoder: The encoder to get the remote endpoint id from
* @np: The encoder device node
* @port: The number of the port leading to the VOP2
* @reg: The endpoint number leading to the VOP2
*/
int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rkencoder,
struct device_node *np, int port, int reg)
{
struct of_endpoint ep;
struct device_node *en, *ren;
int ret;
en = of_graph_get_endpoint_by_regs(np, port, reg);
if (!en)
return -ENOENT;
ren = of_graph_get_remote_endpoint(en);
if (!ren)
return -ENOENT;
ret = of_graph_parse_endpoint(ren, &ep);
if (ret)
return ret;
rkencoder->crtc_endpoint_id = ep.id;
return 0;
}
/*
* Check if a vop endpoint is leading to a rockchip subdriver or bridge.
* Should be called from the component bind stage of the drivers
* to ensure that all subdrivers are probed.
*
* @ep: endpoint of a rockchip vop
*
* returns true if subdriver, false if external bridge and -ENODEV
* if remote port does not contain a device.
*/
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
{
struct device_node *node = of_graph_get_remote_port_parent(ep);
struct platform_device *pdev;
struct device_driver *drv;
int i;
if (!node)
return -ENODEV;
/* status disabled will prevent creation of platform-devices */
if (!of_device_is_available(node)) {
of_node_put(node);
return -ENODEV;
}
pdev = of_find_device_by_node(node);
of_node_put(node);
/* enabled non-platform-devices can immediately return here */
if (!pdev)
return false;
/*
* All rockchip subdrivers have probed at this point, so
* any device not having a driver now is an external bridge.
*/
drv = pdev->dev.driver;
if (!drv) {
platform_device_put(pdev);
return false;
}
for (i = 0; i < num_rockchip_sub_drivers; i++) {
if (rockchip_sub_drivers[i] == to_platform_driver(drv)) {
platform_device_put(pdev);
return true;
}
}
platform_device_put(pdev);
return false;
}
static void rockchip_drm_match_remove(struct device *dev)
{
struct device_link *link;
list_for_each_entry(link, &dev->links.consumers, s_node)
device_link_del(link);
}
static struct component_match *rockchip_drm_match_add(struct device *dev)
{
struct component_match *match = NULL;
int i;
for (i = 0; i < num_rockchip_sub_drivers; i++) {
struct platform_driver *drv = rockchip_sub_drivers[i];
struct device *p = NULL, *d;
do {
d = platform_find_device_by_driver(p, &drv->driver);
put_device(p);
p = d;
if (!d)
break;
device_link_add(dev, d, DL_FLAG_STATELESS);
component_match_add(dev, &match, component_compare_dev, d);
} while (true);
}
if (IS_ERR(match))
rockchip_drm_match_remove(dev);
return match ?: ERR_PTR(-ENODEV);
}
static const struct component_master_ops rockchip_drm_ops = {
.bind = rockchip_drm_bind,
.unbind = rockchip_drm_unbind,
};
static int rockchip_drm_platform_of_probe(struct device *dev)
{
struct device_node *np = dev->of_node;
struct device_node *port;
bool found = false;
int i;
if (!np)
return -ENODEV;
for (i = 0;; i++) {
port = of_parse_phandle(np, "ports", i);
if (!port)
break;
if (!of_device_is_available(port->parent)) {
of_node_put(port);
continue;
}
found = true;
of_node_put(port);
}
if (i == 0) {
DRM_DEV_ERROR(dev, "missing 'ports' property\n");
return -ENODEV;
}
if (!found) {
DRM_DEV_ERROR(dev,
"No available vop found for display-subsystem.\n");
return -ENODEV;
}
return 0;
}
static int rockchip_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
int ret;
ret = rockchip_drm_platform_of_probe(dev);
if (ret)
return ret;
match = rockchip_drm_match_add(dev);
if (IS_ERR(match))
return PTR_ERR(match);
ret = component_master_add_with_match(dev, &rockchip_drm_ops, match);
if (ret < 0) {
rockchip_drm_match_remove(dev);
return ret;
}
return 0;
}
static void rockchip_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &rockchip_drm_ops);
rockchip_drm_match_remove(&pdev->dev);
}
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
if (drm)
drm_atomic_helper_shutdown(drm);
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove_new = rockchip_drm_platform_remove,
.shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
.pm = &rockchip_drm_pm_ops,
},
};
#define ADD_ROCKCHIP_SUB_DRIVER(drv, cond) { \
if (IS_ENABLED(cond) && \
!WARN_ON(num_rockchip_sub_drivers >= MAX_ROCKCHIP_SUB_DRIVERS)) \
rockchip_sub_drivers[num_rockchip_sub_drivers++] = &drv; \
}
static int __init rockchip_drm_init(void)
{
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
num_rockchip_sub_drivers = 0;
ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_ROCKCHIP_VOP);
ADD_ROCKCHIP_SUB_DRIVER(vop2_platform_driver, CONFIG_ROCKCHIP_VOP2);
ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver,
CONFIG_ROCKCHIP_LVDS);
ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
CONFIG_ROCKCHIP_ANALOGIX_DP);
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
CONFIG_ROCKCHIP_RK3066_HDMI);
ret = platform_register_drivers(rockchip_sub_drivers,
num_rockchip_sub_drivers);
if (ret)
return ret;
ret = platform_driver_register(&rockchip_drm_platform_driver);
if (ret)
goto err_unreg_drivers;
return 0;
err_unreg_drivers:
platform_unregister_drivers(rockchip_sub_drivers,
num_rockchip_sub_drivers);
return ret;
}
static void __exit rockchip_drm_fini(void)
{
platform_driver_unregister(&rockchip_drm_platform_driver);
platform_unregister_drivers(rockchip_sub_drivers,
num_rockchip_sub_drivers);
}
module_init(rockchip_drm_init);
module_exit(rockchip_drm_fini);
MODULE_AUTHOR("Mark Yao <[email protected]>");
MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/rockchip/rockchip_drm_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <[email protected]>
*/
#include <linux/dma-buf.h>
#include <linux/iommu.h>
#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
{
struct drm_device *drm = rk_obj->base.dev;
struct rockchip_drm_private *private = drm->dev_private;
int prot = IOMMU_READ | IOMMU_WRITE;
ssize_t ret;
mutex_lock(&private->mm_lock);
ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
rk_obj->base.size, PAGE_SIZE,
0, 0);
mutex_unlock(&private->mm_lock);
if (ret < 0) {
DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
return ret;
}
rk_obj->dma_addr = rk_obj->mm.start;
ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
prot);
if (ret < rk_obj->base.size) {
DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
ret, rk_obj->base.size);
ret = -ENOMEM;
goto err_remove_node;
}
rk_obj->size = ret;
return 0;
err_remove_node:
mutex_lock(&private->mm_lock);
drm_mm_remove_node(&rk_obj->mm);
mutex_unlock(&private->mm_lock);
return ret;
}
static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
{
struct drm_device *drm = rk_obj->base.dev;
struct rockchip_drm_private *private = drm->dev_private;
iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
mutex_lock(&private->mm_lock);
drm_mm_remove_node(&rk_obj->mm);
mutex_unlock(&private->mm_lock);
return 0;
}
static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
{
struct drm_device *drm = rk_obj->base.dev;
int ret, i;
struct scatterlist *s;
rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
if (IS_ERR(rk_obj->pages))
return PTR_ERR(rk_obj->pages);
rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
rk_obj->pages, rk_obj->num_pages);
if (IS_ERR(rk_obj->sgt)) {
ret = PTR_ERR(rk_obj->sgt);
goto err_put_pages;
}
/*
* Fake up the SG table so that dma_sync_sg_for_device() can be used
* to flush the pages associated with it.
*
* TODO: Replace this by drm_clflush_sg() once it can be implemented
* without relying on symbols that are not exported.
*/
for_each_sgtable_sg(rk_obj->sgt, s, i)
sg_dma_address(s) = sg_phys(s);
dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
return 0;
err_put_pages:
drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
return ret;
}
static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
{
sg_free_table(rk_obj->sgt);
kfree(rk_obj->sgt);
drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
}
static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
int ret;
ret = rockchip_gem_get_pages(rk_obj);
if (ret < 0)
return ret;
ret = rockchip_gem_iommu_map(rk_obj);
if (ret < 0)
goto err_free;
if (alloc_kmap) {
rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!rk_obj->kvaddr) {
DRM_ERROR("failed to vmap() buffer\n");
ret = -ENOMEM;
goto err_unmap;
}
}
return 0;
err_unmap:
rockchip_gem_iommu_unmap(rk_obj);
err_free:
rockchip_gem_put_pages(rk_obj);
return ret;
}
static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
rk_obj->dma_attrs);
if (!rk_obj->kvaddr) {
DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
return -ENOMEM;
}
return 0;
}
static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
struct rockchip_drm_private *private = drm->dev_private;
if (private->domain)
return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
else
return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
}
static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
{
vunmap(rk_obj->kvaddr);
rockchip_gem_iommu_unmap(rk_obj);
rockchip_gem_put_pages(rk_obj);
}
static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
rk_obj->dma_attrs);
}
static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
{
if (rk_obj->pages)
rockchip_gem_free_iommu(rk_obj);
else
rockchip_gem_free_dma(rk_obj);
}
static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
unsigned int count = obj->size >> PAGE_SHIFT;
unsigned long user_count = vma_pages(vma);
if (user_count == 0)
return -ENXIO;
return vm_map_pages(vma, rk_obj->pages, count);
}
static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
obj->size, rk_obj->dma_attrs);
}
static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
/*
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
* whole buffer from the start.
*/
vma->vm_pgoff = 0;
/*
* We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
if (rk_obj->pages)
ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
return ret;
}
static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
{
drm_gem_object_release(&rk_obj->base);
kfree(rk_obj);
}
static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
.free = rockchip_gem_free_object,
.get_sg_table = rockchip_gem_prime_get_sg_table,
.vmap = rockchip_gem_prime_vmap,
.vunmap = rockchip_gem_prime_vunmap,
.mmap = rockchip_drm_gem_object_mmap,
.vm_ops = &drm_gem_dma_vm_ops,
};
static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
size = round_up(size, PAGE_SIZE);
rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
if (!rk_obj)
return ERR_PTR(-ENOMEM);
obj = &rk_obj->base;
obj->funcs = &rockchip_gem_object_funcs;
drm_gem_object_init(drm, obj, size);
return rk_obj;
}
struct rockchip_gem_object *
rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
bool alloc_kmap)
{
struct rockchip_gem_object *rk_obj;
int ret;
rk_obj = rockchip_gem_alloc_object(drm, size);
if (IS_ERR(rk_obj))
return rk_obj;
ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
if (ret)
goto err_free_rk_obj;
return rk_obj;
err_free_rk_obj:
rockchip_gem_release_object(rk_obj);
return ERR_PTR(ret);
}
/*
* rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
* callback function
*/
void rockchip_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *drm = obj->dev;
struct rockchip_drm_private *private = drm->dev_private;
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (obj->import_attach) {
if (private->domain) {
rockchip_gem_iommu_unmap(rk_obj);
} else {
dma_unmap_sgtable(drm->dev, rk_obj->sgt,
DMA_BIDIRECTIONAL, 0);
}
drm_prime_gem_destroy(obj, rk_obj->sgt);
} else {
rockchip_gem_free_buf(rk_obj);
}
rockchip_gem_release_object(rk_obj);
}
/*
* rockchip_gem_create_with_handle - allocate an object with the given
* size and create a gem handle on it
*
* returns a struct rockchip_gem_object* on success or ERR_PTR values
* on failure.
*/
static struct rockchip_gem_object *
rockchip_gem_create_with_handle(struct drm_file *file_priv,
struct drm_device *drm, unsigned int size,
unsigned int *handle)
{
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
bool is_framebuffer;
int ret;
is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file;
rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer);
if (IS_ERR(rk_obj))
return ERR_CAST(rk_obj);
obj = &rk_obj->base;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, obj, handle);
if (ret)
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(obj);
return rk_obj;
err_handle_create:
rockchip_gem_free_object(obj);
return ERR_PTR(ret);
}
/*
* rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
* function
*
* This aligns the pitch and size arguments to the minimum required. wrap
* this into your own function if you need bigger alignment.
*/
int rockchip_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct rockchip_gem_object *rk_obj;
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
/*
* align to 64 bytes since Mali requires it.
*/
args->pitch = ALIGN(min_pitch, 64);
args->size = args->pitch * args->height;
rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
&args->handle);
return PTR_ERR_OR_ZERO(rk_obj);
}
/*
* Allocate a sg_table for this GEM object.
* Note: Both the table's contents, and the sg_table itself must be freed by
* the caller.
* Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
*/
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
struct sg_table *sgt;
int ret;
if (rk_obj->pages)
return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
rk_obj->dma_addr, obj->size,
rk_obj->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
return ERR_PTR(ret);
}
return sgt;
}
static int
rockchip_gem_iommu_map_sg(struct drm_device *drm,
struct dma_buf_attachment *attach,
struct sg_table *sg,
struct rockchip_gem_object *rk_obj)
{
rk_obj->sgt = sg;
return rockchip_gem_iommu_map(rk_obj);
}
static int
rockchip_gem_dma_map_sg(struct drm_device *drm,
struct dma_buf_attachment *attach,
struct sg_table *sg,
struct rockchip_gem_object *rk_obj)
{
int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
if (err)
return err;
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
return -EINVAL;
}
rk_obj->dma_addr = sg_dma_address(sg->sgl);
rk_obj->sgt = sg;
return 0;
}
struct drm_gem_object *
rockchip_gem_prime_import_sg_table(struct drm_device *drm,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
struct rockchip_drm_private *private = drm->dev_private;
struct rockchip_gem_object *rk_obj;
int ret;
rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
if (IS_ERR(rk_obj))
return ERR_CAST(rk_obj);
if (private->domain)
ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
else
ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
if (ret < 0) {
DRM_ERROR("failed to import sg table: %d\n", ret);
goto err_free_rk_obj;
}
return &rk_obj->base;
err_free_rk_obj:
rockchip_gem_release_object(rk_obj);
return ERR_PTR(ret);
}
int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
void *vaddr;
if (rk_obj->kvaddr)
vaddr = rk_obj->kvaddr;
else
vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
return 0;
}
if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return -ENOMEM;
iosys_map_set_vaddr(map, rk_obj->kvaddr);
return 0;
}
void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
if (map->vaddr != rk_obj->kvaddr)
vunmap(map->vaddr);
return;
}
/* Nothing to do if allocated by DMA mapping API. */
}
| linux-master | drivers/gpu/drm/rockchip/rockchip_drm_gem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:
* Sandy Huang <[email protected]>
*/
#include <linux/component.h>
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
struct rockchip_rgb {
struct device *dev;
struct drm_device *drm_dev;
struct drm_bridge *bridge;
struct rockchip_encoder encoder;
struct drm_connector connector;
int output_mode;
};
static int
rockchip_rgb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
struct drm_connector *connector = conn_state->connector;
struct drm_display_info *info = &connector->display_info;
u32 bus_format;
if (info->num_bus_formats)
bus_format = info->bus_formats[0];
else
bus_format = MEDIA_BUS_FMT_RGB888_1X24;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB666_1X18:
s->output_mode = ROCKCHIP_OUT_MODE_P666;
break;
case MEDIA_BUS_FMT_RGB565_1X16:
s->output_mode = ROCKCHIP_OUT_MODE_P565;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
default:
s->output_mode = ROCKCHIP_OUT_MODE_P888;
break;
}
s->output_type = DRM_MODE_CONNECTOR_LVDS;
return 0;
}
static const
struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
.atomic_check = rockchip_rgb_encoder_atomic_check,
};
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev,
int video_port)
{
struct rockchip_rgb *rgb;
struct drm_encoder *encoder;
struct device_node *port, *endpoint;
u32 endpoint_id;
int ret = 0, child_count = 0;
struct drm_panel *panel;
struct drm_bridge *bridge;
struct drm_connector *connector;
rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
return ERR_PTR(-ENOMEM);
rgb->dev = dev;
rgb->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, video_port);
if (!port)
return ERR_PTR(-EINVAL);
for_each_child_of_node(port, endpoint) {
if (of_property_read_u32(endpoint, "reg", &endpoint_id))
endpoint_id = 0;
/* if subdriver (> 0) or error case (< 0), ignore entry */
if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
continue;
child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, video_port,
endpoint_id, &panel, &bridge);
if (!ret) {
of_node_put(endpoint);
break;
}
}
of_node_put(port);
/* if the rgb output is not connected to anything, just return */
if (!child_count)
return NULL;
if (ret < 0) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "failed to find panel or bridge %d\n", ret);
return ERR_PTR(ret);
}
encoder = &rgb->encoder.encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
return ERR_PTR(ret);
}
drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
}
rgb->bridge = bridge;
ret = drm_bridge_attach(encoder, rgb->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
connector = &rgb->connector;
connector = drm_bridge_connector_init(rgb->drm_dev, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize bridge connector: %pe\n",
connector);
ret = PTR_ERR(connector);
goto err_free_encoder;
}
rgb->encoder.crtc_endpoint_id = endpoint_id;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach encoder: %d\n", ret);
goto err_free_connector;
}
return rgb;
err_free_connector:
drm_connector_cleanup(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_init);
void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
drm_connector_cleanup(&rgb->connector);
drm_encoder_cleanup(&rgb->encoder.encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
| linux-master | drivers/gpu/drm/rockchip/rockchip_rgb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Zheng Yang <[email protected]>
* Yakir Yang <[email protected]>
*/
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#include "inno_hdmi.h"
struct hdmi_data_info {
int vic;
bool sink_has_audio;
unsigned int enc_in_format;
unsigned int enc_out_format;
unsigned int colorimetry;
};
struct inno_hdmi_i2c {
struct i2c_adapter adap;
u8 ddc_addr;
u8 segment_addr;
struct mutex lock;
struct completion cmp;
};
struct inno_hdmi {
struct device *dev;
struct drm_device *drm_dev;
int irq;
struct clk *pclk;
void __iomem *regs;
struct drm_connector connector;
struct rockchip_encoder encoder;
struct inno_hdmi_i2c *i2c;
struct i2c_adapter *ddc;
unsigned int tmds_rate;
struct hdmi_data_info hdmi_data;
struct drm_display_mode previous_mode;
};
static struct inno_hdmi *encoder_to_inno_hdmi(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct inno_hdmi, encoder);
}
static struct inno_hdmi *connector_to_inno_hdmi(struct drm_connector *connector)
{
return container_of(connector, struct inno_hdmi, connector);
}
enum {
CSC_ITU601_16_235_TO_RGB_0_255_8BIT,
CSC_ITU601_0_255_TO_RGB_0_255_8BIT,
CSC_ITU709_16_235_TO_RGB_0_255_8BIT,
CSC_RGB_0_255_TO_ITU601_16_235_8BIT,
CSC_RGB_0_255_TO_ITU709_16_235_8BIT,
CSC_RGB_0_255_TO_RGB_16_235_8BIT,
};
static const char coeff_csc[][24] = {
/*
* YUV2RGB:601 SD mode(Y[16:235], UV[16:240], RGB[0:255]):
* R = 1.164*Y + 1.596*V - 204
* G = 1.164*Y - 0.391*U - 0.813*V + 154
* B = 1.164*Y + 2.018*U - 258
*/
{
0x04, 0xa7, 0x00, 0x00, 0x06, 0x62, 0x02, 0xcc,
0x04, 0xa7, 0x11, 0x90, 0x13, 0x40, 0x00, 0x9a,
0x04, 0xa7, 0x08, 0x12, 0x00, 0x00, 0x03, 0x02
},
/*
* YUV2RGB:601 SD mode(YUV[0:255],RGB[0:255]):
* R = Y + 1.402*V - 248
* G = Y - 0.344*U - 0.714*V + 135
* B = Y + 1.772*U - 227
*/
{
0x04, 0x00, 0x00, 0x00, 0x05, 0x9b, 0x02, 0xf8,
0x04, 0x00, 0x11, 0x60, 0x12, 0xdb, 0x00, 0x87,
0x04, 0x00, 0x07, 0x16, 0x00, 0x00, 0x02, 0xe3
},
/*
* YUV2RGB:709 HD mode(Y[16:235],UV[16:240],RGB[0:255]):
* R = 1.164*Y + 1.793*V - 248
* G = 1.164*Y - 0.213*U - 0.534*V + 77
* B = 1.164*Y + 2.115*U - 289
*/
{
0x04, 0xa7, 0x00, 0x00, 0x07, 0x2c, 0x02, 0xf8,
0x04, 0xa7, 0x10, 0xda, 0x12, 0x22, 0x00, 0x4d,
0x04, 0xa7, 0x08, 0x74, 0x00, 0x00, 0x03, 0x21
},
/*
* RGB2YUV:601 SD mode:
* Cb = -0.291G - 0.148R + 0.439B + 128
* Y = 0.504G + 0.257R + 0.098B + 16
* Cr = -0.368G + 0.439R - 0.071B + 128
*/
{
0x11, 0x5f, 0x01, 0x82, 0x10, 0x23, 0x00, 0x80,
0x02, 0x1c, 0x00, 0xa1, 0x00, 0x36, 0x00, 0x1e,
0x11, 0x29, 0x10, 0x59, 0x01, 0x82, 0x00, 0x80
},
/*
* RGB2YUV:709 HD mode:
* Cb = - 0.338G - 0.101R + 0.439B + 128
* Y = 0.614G + 0.183R + 0.062B + 16
* Cr = - 0.399G + 0.439R - 0.040B + 128
*/
{
0x11, 0x98, 0x01, 0xc1, 0x10, 0x28, 0x00, 0x80,
0x02, 0x74, 0x00, 0xbb, 0x00, 0x3f, 0x00, 0x10,
0x11, 0x5a, 0x10, 0x67, 0x01, 0xc1, 0x00, 0x80
},
/*
* RGB[0:255]2RGB[16:235]:
* R' = R x (235-16)/255 + 16;
* G' = G x (235-16)/255 + 16;
* B' = B x (235-16)/255 + 16;
*/
{
0x00, 0x00, 0x03, 0x6F, 0x00, 0x00, 0x00, 0x10,
0x03, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
0x00, 0x00, 0x00, 0x00, 0x03, 0x6F, 0x00, 0x10
},
};
static inline u8 hdmi_readb(struct inno_hdmi *hdmi, u16 offset)
{
return readl_relaxed(hdmi->regs + (offset) * 0x04);
}
static inline void hdmi_writeb(struct inno_hdmi *hdmi, u16 offset, u32 val)
{
writel_relaxed(val, hdmi->regs + (offset) * 0x04);
}
static inline void hdmi_modb(struct inno_hdmi *hdmi, u16 offset,
u32 msk, u32 val)
{
u8 temp = hdmi_readb(hdmi, offset) & ~msk;
temp |= val & msk;
hdmi_writeb(hdmi, offset, temp);
}
static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi)
{
int ddc_bus_freq;
ddc_bus_freq = (hdmi->tmds_rate >> 2) / HDMI_SCL_RATE;
hdmi_writeb(hdmi, DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
hdmi_writeb(hdmi, DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
/* Clear the EDID interrupt flag and mute the interrupt */
hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0);
hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
}
static void inno_hdmi_sys_power(struct inno_hdmi *hdmi, bool enable)
{
if (enable)
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_ON);
else
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_OFF);
}
static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
{
switch (mode) {
case NORMAL:
inno_hdmi_sys_power(hdmi, false);
hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x6f);
hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0xbb);
hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
inno_hdmi_sys_power(hdmi, true);
break;
case LOWER_PWR:
inno_hdmi_sys_power(hdmi, false);
hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
break;
default:
DRM_DEV_ERROR(hdmi->dev, "Unknown power mode %d\n", mode);
}
}
static void inno_hdmi_reset(struct inno_hdmi *hdmi)
{
u32 val;
u32 msk;
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL);
udelay(100);
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG);
udelay(100);
msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL;
val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
inno_hdmi_set_pwr_mode(hdmi, NORMAL);
}
static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi, int setup_rc,
union hdmi_infoframe *frame, u32 frame_index,
u32 mask, u32 disable, u32 enable)
{
if (mask)
hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, disable);
hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, frame_index);
if (setup_rc >= 0) {
u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
ssize_t rc, i;
rc = hdmi_infoframe_pack(frame, packed_frame,
sizeof(packed_frame));
if (rc < 0)
return rc;
for (i = 0; i < rc; i++)
hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
packed_frame[i]);
if (mask)
hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, enable);
}
return setup_rc;
}
static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
int rc;
rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
&hdmi->connector,
mode);
return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI,
m_PACKET_VSI_EN, v_PACKET_VSI_EN(0), v_PACKET_VSI_EN(1));
}
static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
int rc;
rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&hdmi->connector,
mode);
if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_AVI, 0, 0, 0);
}
static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
{
struct hdmi_data_info *data = &hdmi->hdmi_data;
int c0_c2_change = 0;
int csc_enable = 0;
int csc_mode = 0;
int auto_csc = 0;
int value;
int i;
/* Input video mode is SDR RGB24bit, data enable signal from external */
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL1, v_DE_EXTERNAL |
v_VIDEO_INPUT_FORMAT(VIDEO_INPUT_SDR_RGB444));
/* Input color hardcode to RGB, and output color hardcode to RGB888 */
value = v_VIDEO_INPUT_BITS(VIDEO_INPUT_8BITS) |
v_VIDEO_OUTPUT_COLOR(0) |
v_VIDEO_INPUT_CSP(0);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL2, value);
if (data->enc_in_format == data->enc_out_format) {
if ((data->enc_in_format == HDMI_COLORSPACE_RGB) ||
(data->enc_in_format >= HDMI_COLORSPACE_YUV444)) {
value = v_SOF_DISABLE | v_COLOR_DEPTH_NOT_INDICATED(1);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
hdmi_modb(hdmi, HDMI_VIDEO_CONTRL,
m_VIDEO_AUTO_CSC | m_VIDEO_C0_C2_SWAP,
v_VIDEO_AUTO_CSC(AUTO_CSC_DISABLE) |
v_VIDEO_C0_C2_SWAP(C0_C2_CHANGE_DISABLE));
return 0;
}
}
if (data->colorimetry == HDMI_COLORIMETRY_ITU_601) {
if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
(data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
auto_csc = AUTO_CSC_DISABLE;
c0_c2_change = C0_C2_CHANGE_DISABLE;
csc_enable = v_CSC_ENABLE;
} else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
(data->enc_out_format == HDMI_COLORSPACE_RGB)) {
csc_mode = CSC_ITU601_16_235_TO_RGB_0_255_8BIT;
auto_csc = AUTO_CSC_ENABLE;
c0_c2_change = C0_C2_CHANGE_DISABLE;
csc_enable = v_CSC_DISABLE;
}
} else {
if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
(data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
auto_csc = AUTO_CSC_DISABLE;
c0_c2_change = C0_C2_CHANGE_DISABLE;
csc_enable = v_CSC_ENABLE;
} else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
(data->enc_out_format == HDMI_COLORSPACE_RGB)) {
csc_mode = CSC_ITU709_16_235_TO_RGB_0_255_8BIT;
auto_csc = AUTO_CSC_ENABLE;
c0_c2_change = C0_C2_CHANGE_DISABLE;
csc_enable = v_CSC_DISABLE;
}
}
for (i = 0; i < 24; i++)
hdmi_writeb(hdmi, HDMI_VIDEO_CSC_COEF + i,
coeff_csc[csc_mode][i]);
value = v_SOF_DISABLE | csc_enable | v_COLOR_DEPTH_NOT_INDICATED(1);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
hdmi_modb(hdmi, HDMI_VIDEO_CONTRL, m_VIDEO_AUTO_CSC |
m_VIDEO_C0_C2_SWAP, v_VIDEO_AUTO_CSC(auto_csc) |
v_VIDEO_C0_C2_SWAP(c0_c2_change));
return 0;
}
static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
int value;
/* Set detail external video timing polarity and interlace mode */
value = v_EXTERANL_VIDEO(1);
value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
v_HSYNC_POLARITY(1) : v_HSYNC_POLARITY(0);
value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
v_VSYNC_POLARITY(1) : v_VSYNC_POLARITY(0);
value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
v_INETLACE(1) : v_INETLACE(0);
hdmi_writeb(hdmi, HDMI_VIDEO_TIMING_CTL, value);
/* Set detail external video timing */
value = mode->htotal;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_H, (value >> 8) & 0xFF);
value = mode->htotal - mode->hdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
value = mode->hsync_start - mode->hdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
value = mode->hsync_end - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_H, (value >> 8) & 0xFF);
value = mode->vtotal;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_H, (value >> 8) & 0xFF);
value = mode->vtotal - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
value = mode->vsync_start - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
value = mode->vsync_end - mode->vsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDURATION, value & 0xFF);
hdmi_writeb(hdmi, HDMI_PHY_PRE_DIV_RATIO, 0x1e);
hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_LOW, 0x2c);
hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH, 0x01);
return 0;
}
static int inno_hdmi_setup(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct drm_display_info *display = &hdmi->connector.display_info;
hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
hdmi->hdmi_data.enc_in_format = HDMI_COLORSPACE_RGB;
hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
if ((hdmi->hdmi_data.vic == 6) || (hdmi->hdmi_data.vic == 7) ||
(hdmi->hdmi_data.vic == 21) || (hdmi->hdmi_data.vic == 22) ||
(hdmi->hdmi_data.vic == 2) || (hdmi->hdmi_data.vic == 3) ||
(hdmi->hdmi_data.vic == 17) || (hdmi->hdmi_data.vic == 18))
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
/* Mute video and audio output */
hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
v_AUDIO_MUTE(1) | v_VIDEO_MUTE(1));
/* Set HDMI Mode */
hdmi_writeb(hdmi, HDMI_HDCP_CTRL,
v_HDMI_DVI(display->is_hdmi));
inno_hdmi_config_video_timing(hdmi, mode);
inno_hdmi_config_video_csc(hdmi);
if (display->is_hdmi) {
inno_hdmi_config_video_avi(hdmi, mode);
inno_hdmi_config_video_vsi(hdmi, mode);
}
/*
* When IP controller have configured to an accurate video
* timing, then the TMDS clock source would be switched to
* DCLK_LCDC, so we need to init the TMDS rate to mode pixel
* clock rate, and reconfigure the DDC clock.
*/
hdmi->tmds_rate = mode->clock * 1000;
inno_hdmi_i2c_init(hdmi);
/* Unmute video and audio output */
hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
v_AUDIO_MUTE(0) | v_VIDEO_MUTE(0));
return 0;
}
static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
inno_hdmi_setup(hdmi, adj_mode);
/* Store the display mode for plugin/DPMS poweron events */
drm_mode_copy(&hdmi->previous_mode, adj_mode);
}
static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
inno_hdmi_set_pwr_mode(hdmi, NORMAL);
}
static void inno_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
inno_hdmi_set_pwr_mode(hdmi, LOWER_PWR);
}
static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
return true;
}
static int
inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
s->output_mode = ROCKCHIP_OUT_MODE_P888;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
return 0;
}
static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.enable = inno_hdmi_encoder_enable,
.disable = inno_hdmi_encoder_disable,
.mode_fixup = inno_hdmi_encoder_mode_fixup,
.mode_set = inno_hdmi_encoder_mode_set,
.atomic_check = inno_hdmi_encoder_atomic_check,
};
static enum drm_connector_status
inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
return (hdmi_readb(hdmi, HDMI_STATUS) & m_HOTPLUG) ?
connector_status_connected : connector_status_disconnected;
}
static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
struct edid *edid;
int ret = 0;
if (!hdmi->ddc)
return 0;
edid = drm_get_edid(connector, hdmi->ddc);
if (edid) {
hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return ret;
}
static enum drm_mode_status
inno_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static int
inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes(connector, 1920, 1080);
}
static void inno_hdmi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs inno_hdmi_connector_funcs = {
.fill_modes = inno_hdmi_probe_single_connector_modes,
.detect = inno_hdmi_connector_detect,
.destroy = inno_hdmi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
.get_modes = inno_hdmi_connector_get_modes,
.mode_valid = inno_hdmi_connector_mode_valid,
};
static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
{
struct drm_encoder *encoder = &hdmi->encoder.encoder;
struct device *dev = hdmi->dev;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
* not been registered yet. Defer probing, and hope that
* the required CRTC is added later.
*/
if (encoder->possible_crtcs == 0)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(&hdmi->connector,
&inno_hdmi_connector_helper_funcs);
drm_connector_init_with_ddc(drm, &hdmi->connector,
&inno_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
static irqreturn_t inno_hdmi_i2c_irq(struct inno_hdmi *hdmi)
{
struct inno_hdmi_i2c *i2c = hdmi->i2c;
u8 stat;
stat = hdmi_readb(hdmi, HDMI_INTERRUPT_STATUS1);
if (!(stat & m_INT_EDID_READY))
return IRQ_NONE;
/* Clear HDMI EDID interrupt flag */
hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
complete(&i2c->cmp);
return IRQ_HANDLED;
}
static irqreturn_t inno_hdmi_hardirq(int irq, void *dev_id)
{
struct inno_hdmi *hdmi = dev_id;
irqreturn_t ret = IRQ_NONE;
u8 interrupt;
if (hdmi->i2c)
ret = inno_hdmi_i2c_irq(hdmi);
interrupt = hdmi_readb(hdmi, HDMI_STATUS);
if (interrupt & m_INT_HOTPLUG) {
hdmi_modb(hdmi, HDMI_STATUS, m_INT_HOTPLUG, m_INT_HOTPLUG);
ret = IRQ_WAKE_THREAD;
}
return ret;
}
static irqreturn_t inno_hdmi_irq(int irq, void *dev_id)
{
struct inno_hdmi *hdmi = dev_id;
drm_helper_hpd_irq_event(hdmi->connector.dev);
return IRQ_HANDLED;
}
static int inno_hdmi_i2c_read(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
{
int length = msgs->len;
u8 *buf = msgs->buf;
int ret;
ret = wait_for_completion_timeout(&hdmi->i2c->cmp, HZ / 10);
if (!ret)
return -EAGAIN;
while (length--)
*buf++ = hdmi_readb(hdmi, HDMI_EDID_FIFO_ADDR);
return 0;
}
static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
{
/*
* The DDC module only support read EDID message, so
* we assume that each word write to this i2c adapter
* should be the offset of EDID word address.
*/
if ((msgs->len != 1) ||
((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR)))
return -EINVAL;
reinit_completion(&hdmi->i2c->cmp);
if (msgs->addr == DDC_SEGMENT_ADDR)
hdmi->i2c->segment_addr = msgs->buf[0];
if (msgs->addr == DDC_ADDR)
hdmi->i2c->ddc_addr = msgs->buf[0];
/* Set edid fifo first addr */
hdmi_writeb(hdmi, HDMI_EDID_FIFO_OFFSET, 0x00);
/* Set edid word address 0x00/0x80 */
hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
/* Set edid segment pointer */
hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
return 0;
}
static int inno_hdmi_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct inno_hdmi *hdmi = i2c_get_adapdata(adap);
struct inno_hdmi_i2c *i2c = hdmi->i2c;
int i, ret = 0;
mutex_lock(&i2c->lock);
/* Clear the EDID interrupt flag and unmute the interrupt */
hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, m_INT_EDID_READY);
hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
for (i = 0; i < num; i++) {
DRM_DEV_DEBUG(hdmi->dev,
"xfer: num: %d/%d, len: %d, flags: %#x\n",
i + 1, num, msgs[i].len, msgs[i].flags);
if (msgs[i].flags & I2C_M_RD)
ret = inno_hdmi_i2c_read(hdmi, &msgs[i]);
else
ret = inno_hdmi_i2c_write(hdmi, &msgs[i]);
if (ret < 0)
break;
}
if (!ret)
ret = num;
/* Mute HDMI EDID interrupt */
hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0);
mutex_unlock(&i2c->lock);
return ret;
}
static u32 inno_hdmi_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm inno_hdmi_algorithm = {
.master_xfer = inno_hdmi_i2c_xfer,
.functionality = inno_hdmi_i2c_func,
};
static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
{
struct i2c_adapter *adap;
struct inno_hdmi_i2c *i2c;
int ret;
i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return ERR_PTR(-ENOMEM);
mutex_init(&i2c->lock);
init_completion(&i2c->cmp);
adap = &i2c->adap;
adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
adap->algo = &inno_hdmi_algorithm;
strscpy(adap->name, "Inno HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
if (ret) {
dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
devm_kfree(hdmi->dev, i2c);
return ERR_PTR(ret);
}
hdmi->i2c = i2c;
DRM_DEV_INFO(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
return adap;
}
static int inno_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct inno_hdmi *hdmi;
int irq;
int ret;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->dev = dev;
hdmi->drm_dev = drm;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
if (IS_ERR(hdmi->pclk)) {
DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
return PTR_ERR(hdmi->pclk);
}
ret = clk_prepare_enable(hdmi->pclk);
if (ret) {
DRM_DEV_ERROR(hdmi->dev,
"Cannot enable HDMI pclk clock: %d\n", ret);
return ret;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto err_disable_clk;
}
inno_hdmi_reset(hdmi);
hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
if (IS_ERR(hdmi->ddc)) {
ret = PTR_ERR(hdmi->ddc);
hdmi->ddc = NULL;
goto err_disable_clk;
}
/*
* When IP controller haven't configured to an accurate video
* timing, then the TMDS clock source would be switched to
* PCLK_HDMI, so we need to init the TMDS rate to PCLK rate,
* and reconfigure the DDC clock.
*/
hdmi->tmds_rate = clk_get_rate(hdmi->pclk);
inno_hdmi_i2c_init(hdmi);
ret = inno_hdmi_register(drm, hdmi);
if (ret)
goto err_put_adapter;
dev_set_drvdata(dev, hdmi);
/* Unmute hotplug interrupt */
hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
inno_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
if (ret < 0)
goto err_cleanup_hdmi;
return 0;
err_cleanup_hdmi:
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
err_put_adapter:
i2c_put_adapter(hdmi->ddc);
err_disable_clk:
clk_disable_unprepare(hdmi->pclk);
return ret;
}
static void inno_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct inno_hdmi *hdmi = dev_get_drvdata(dev);
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
i2c_put_adapter(hdmi->ddc);
clk_disable_unprepare(hdmi->pclk);
}
static const struct component_ops inno_hdmi_ops = {
.bind = inno_hdmi_bind,
.unbind = inno_hdmi_unbind,
};
static int inno_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &inno_hdmi_ops);
}
static void inno_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &inno_hdmi_ops);
}
static const struct of_device_id inno_hdmi_dt_ids[] = {
{ .compatible = "rockchip,rk3036-inno-hdmi",
},
{},
};
MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids);
struct platform_driver inno_hdmi_driver = {
.probe = inno_hdmi_probe,
.remove_new = inno_hdmi_remove,
.driver = {
.name = "innohdmi-rockchip",
.of_match_table = inno_hdmi_dt_ids,
},
};
| linux-master | drivers/gpu/drm/rockchip/inno_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <[email protected]>
*/
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
#include "rockchip_drm_drv.h"
#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
{ \
.offset = off, \
.mask = _mask, \
.shift = _shift, \
.write_mask = _write_mask, \
.relaxed = _relaxed, \
}
#define VOP_REG(off, _mask, _shift) \
_VOP_REG(off, _mask, _shift, false, true)
#define VOP_REG_SYNC(off, _mask, _shift) \
_VOP_REG(off, _mask, _shift, false, false)
#define VOP_REG_MASK_SYNC(off, _mask, _shift) \
_VOP_REG(off, _mask, _shift, true, false)
static const uint32_t formats_win_full[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_NV24,
DRM_FORMAT_NV42,
};
static const uint64_t format_modifiers_win_full[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const uint64_t format_modifiers_win_full_afbc[] = {
ROCKCHIP_AFBC_MOD,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const uint32_t formats_win_lite[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
};
static const uint64_t format_modifiers_win_lite[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const struct vop_scl_regs rk3036_win0_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
static const struct vop_scl_regs rk3036_win1_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN1_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN1_SCL_FACTOR_YRGB, 0xffff, 16),
};
static const struct vop_win_phy rk3036_win0_data = {
.scl = &rk3036_win0_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
.alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 18),
.alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 0),
.alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_phy rk3036_win1_data = {
.scl = &rk3036_win1_scl,
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
.alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 19),
.alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 1),
.alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_data rk3036_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3036_win0_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &rk3036_win1_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const int rk3036_vop_intrs[] = {
DSP_HOLD_VALID_INTR,
FS_INTR,
LINE_FLAG_INTR,
BUS_ERROR_INTR,
};
static const struct vop_intr rk3036_intr = {
.intrs = rk3036_vop_intrs,
.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
.status = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 0),
.enable = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 4),
.clear = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 8),
};
static const struct vop_modeset rk3036_modeset = {
.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
};
static const struct vop_output rk3036_output = {
.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
};
static const struct vop_common rk3036_common = {
.standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
.dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
.dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27),
.dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10),
.cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0),
};
static const struct vop_data rk3036_vop = {
.intr = &rk3036_intr,
.common = &rk3036_common,
.modeset = &rk3036_modeset,
.output = &rk3036_output,
.win = rk3036_vop_win_data,
.win_size = ARRAY_SIZE(rk3036_vop_win_data),
.max_output = { 1920, 1080 },
};
static const struct vop_win_phy rk3126_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
.dsp_info = VOP_REG(RK3126_WIN1_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3126_WIN1_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3126_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
.alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 19),
.alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 1),
.alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_data rk3126_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3036_win0_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &rk3126_win1_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data rk3126_vop = {
.intr = &rk3036_intr,
.common = &rk3036_common,
.modeset = &rk3036_modeset,
.output = &rk3036_output,
.win = rk3126_vop_win_data,
.win_size = ARRAY_SIZE(rk3126_vop_win_data),
.max_output = { 1920, 1080 },
};
static const int px30_vop_intrs[] = {
FS_INTR,
0, 0,
LINE_FLAG_INTR,
0,
BUS_ERROR_INTR,
0, 0,
DSP_HOLD_VALID_INTR,
};
static const struct vop_intr px30_intr = {
.intrs = px30_vop_intrs,
.nintrs = ARRAY_SIZE(px30_vop_intrs),
.line_flag_num[0] = VOP_REG(PX30_LINE_FLAG, 0xfff, 0),
.status = VOP_REG_MASK_SYNC(PX30_INTR_STATUS, 0xffff, 0),
.enable = VOP_REG_MASK_SYNC(PX30_INTR_EN, 0xffff, 0),
.clear = VOP_REG_MASK_SYNC(PX30_INTR_CLEAR, 0xffff, 0),
};
static const struct vop_common px30_common = {
.standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
.out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
.dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
.dither_down_en = VOP_REG(PX30_DSP_CTRL2, 0x1, 8),
.dither_down_sel = VOP_REG(PX30_DSP_CTRL2, 0x1, 7),
.dither_down_mode = VOP_REG(PX30_DSP_CTRL2, 0x1, 6),
.cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
};
static const struct vop_modeset px30_modeset = {
.htotal_pw = VOP_REG(PX30_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
.hact_st_end = VOP_REG(PX30_DSP_HACT_ST_END, 0x0fff0fff, 0),
.vtotal_pw = VOP_REG(PX30_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
.vact_st_end = VOP_REG(PX30_DSP_VACT_ST_END, 0x0fff0fff, 0),
};
static const struct vop_output px30_output = {
.rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
.rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
.mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
.mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
};
static const struct vop_scl_regs px30_win_scl = {
.scale_yrgb_x = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
.scale_cbcr_y = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
static const struct vop_win_phy px30_win0_data = {
.scl = &px30_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
.uv_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 15),
.act_info = VOP_REG(PX30_WIN0_ACT_INFO, 0xffffffff, 0),
.dsp_info = VOP_REG(PX30_WIN0_DSP_INFO, 0xffffffff, 0),
.dsp_st = VOP_REG(PX30_WIN0_DSP_ST, 0xffffffff, 0),
.yrgb_mst = VOP_REG(PX30_WIN0_YRGB_MST0, 0xffffffff, 0),
.uv_mst = VOP_REG(PX30_WIN0_CBR_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 0),
.uv_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 16),
.alpha_pre_mul = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 2),
.alpha_mode = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 1),
.alpha_en = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 0),
};
static const struct vop_win_phy px30_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
.uv_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 15),
.dsp_info = VOP_REG(PX30_WIN1_DSP_INFO, 0xffffffff, 0),
.dsp_st = VOP_REG(PX30_WIN1_DSP_ST, 0xffffffff, 0),
.yrgb_mst = VOP_REG(PX30_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(PX30_WIN1_VIR, 0x1fff, 0),
.alpha_pre_mul = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 2),
.alpha_mode = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 1),
.alpha_en = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 0),
};
static const struct vop_win_phy px30_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
.enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
.rb_swap = VOP_REG(PX30_WIN2_CTRL0, 0x1, 20),
.dsp_info = VOP_REG(PX30_WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(PX30_WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(PX30_WIN2_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(PX30_WIN2_VIR0_1, 0x1fff, 0),
.alpha_pre_mul = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 2),
.alpha_mode = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 1),
.alpha_en = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 0),
};
static const struct vop_win_data px30_vop_big_win_data[] = {
{ .base = 0x00, .phy = &px30_win0_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &px30_win1_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &px30_win2_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data px30_vop_big = {
.version = VOP_VERSION(2, 6),
.intr = &px30_intr,
.feature = VOP_FEATURE_INTERNAL_RGB,
.common = &px30_common,
.modeset = &px30_modeset,
.output = &px30_output,
.win = px30_vop_big_win_data,
.win_size = ARRAY_SIZE(px30_vop_big_win_data),
.max_output = { 1920, 1080 },
};
static const struct vop_win_data px30_vop_lit_win_data[] = {
{ .base = 0x00, .phy = &px30_win1_data,
.type = DRM_PLANE_TYPE_PRIMARY },
};
static const struct vop_data px30_vop_lit = {
.version = VOP_VERSION(2, 5),
.intr = &px30_intr,
.feature = VOP_FEATURE_INTERNAL_RGB,
.common = &px30_common,
.modeset = &px30_modeset,
.output = &px30_output,
.win = px30_vop_lit_win_data,
.win_size = ARRAY_SIZE(px30_vop_lit_win_data),
.max_output = { 1920, 1080 },
};
static const struct vop_scl_regs rk3066_win_scl = {
.scale_yrgb_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
.scale_cbcr_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
static const struct vop_win_phy rk3066_win0_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 4),
.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 19),
.uv_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 22),
.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3066_WIN0_YRGB_MST0, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3066_WIN0_CBR_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3066_WIN0_VIR, 0xffff, 0),
.uv_vir = VOP_REG(RK3066_WIN0_VIR, 0x1fff, 16),
.alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 21),
.alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 0),
};
static const struct vop_win_phy rk3066_win1_data = {
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 7),
.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 23),
.uv_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 26),
.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3066_WIN1_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3066_WIN1_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3066_WIN1_VIR, 0xffff, 0),
.uv_vir = VOP_REG(RK3066_WIN1_VIR, 0x1fff, 16),
.alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 22),
.alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 1),
};
static const struct vop_win_phy rk3066_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 10),
.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 27),
.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3066_WIN2_VIR, 0xffff, 0),
.alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 23),
.alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 2),
};
static const struct vop_modeset rk3066_modeset = {
.htotal_pw = VOP_REG(RK3066_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
.hact_st_end = VOP_REG(RK3066_DSP_HACT_ST_END, 0x1fff1fff, 0),
.vtotal_pw = VOP_REG(RK3066_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
.vact_st_end = VOP_REG(RK3066_DSP_VACT_ST_END, 0x1fff1fff, 0),
};
static const struct vop_output rk3066_output = {
.pin_pol = VOP_REG(RK3066_DSP_CTRL0, 0x7, 4),
};
static const struct vop_common rk3066_common = {
.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
.dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
.dither_up = VOP_REG(RK3066_DSP_CTRL0, 0x1, 9),
.dsp_lut_en = VOP_REG(RK3066_SYS_CTRL1, 0x1, 31),
.data_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 25),
};
static const struct vop_win_data rk3066_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3066_win0_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &rk3066_win1_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &rk3066_win2_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const int rk3066_vop_intrs[] = {
/*
* hs_start interrupt fires at frame-start, so serves
* the same purpose as dsp_hold in the driver.
*/
DSP_HOLD_VALID_INTR,
FS_INTR,
LINE_FLAG_INTR,
BUS_ERROR_INTR,
};
static const struct vop_intr rk3066_intr = {
.intrs = rk3066_vop_intrs,
.nintrs = ARRAY_SIZE(rk3066_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3066_INT_STATUS, 0xfff, 12),
.status = VOP_REG(RK3066_INT_STATUS, 0xf, 0),
.enable = VOP_REG(RK3066_INT_STATUS, 0xf, 4),
.clear = VOP_REG(RK3066_INT_STATUS, 0xf, 8),
};
static const struct vop_data rk3066_vop = {
.version = VOP_VERSION(2, 1),
.intr = &rk3066_intr,
.common = &rk3066_common,
.modeset = &rk3066_modeset,
.output = &rk3066_output,
.win = rk3066_vop_win_data,
.win_size = ARRAY_SIZE(rk3066_vop_win_data),
.max_output = { 1920, 1080 },
};
static const struct vop_scl_regs rk3188_win_scl = {
.scale_yrgb_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
.scale_cbcr_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
static const struct vop_win_phy rk3188_win0_data = {
.scl = &rk3188_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
.uv_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 18),
.act_info = VOP_REG(RK3188_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3188_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3188_WIN0_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3188_WIN0_YRGB_MST0, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3188_WIN0_CBR_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 0),
.alpha_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 18),
.alpha_en = VOP_REG(RK3188_ALPHA_CTRL, 0x1, 0),
.alpha_pre_mul = VOP_REG(RK3188_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_phy rk3188_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
/* no act_info on window1 */
.dsp_info = VOP_REG(RK3188_WIN1_DSP_INFO, 0x07ff07ff, 0),
.dsp_st = VOP_REG(RK3188_WIN1_DSP_ST, 0x0fff0fff, 0),
.yrgb_mst = VOP_REG(RK3188_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 16),
.alpha_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 19),
.alpha_en = VOP_REG(RK3188_ALPHA_CTRL, 0x1, 1),
.alpha_pre_mul = VOP_REG(RK3188_DSP_CTRL0, 0x1, 29),
};
static const struct vop_modeset rk3188_modeset = {
.htotal_pw = VOP_REG(RK3188_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
.hact_st_end = VOP_REG(RK3188_DSP_HACT_ST_END, 0x0fff0fff, 0),
.vtotal_pw = VOP_REG(RK3188_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
.vact_st_end = VOP_REG(RK3188_DSP_VACT_ST_END, 0x0fff0fff, 0),
};
static const struct vop_output rk3188_output = {
.pin_pol = VOP_REG(RK3188_DSP_CTRL0, 0xf, 4),
};
static const struct vop_common rk3188_common = {
.gate_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 31),
.standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
.dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
.dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 24),
.dither_up = VOP_REG(RK3188_DSP_CTRL0, 0x1, 9),
.dsp_lut_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 28),
.data_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 25),
};
static const struct vop_win_data rk3188_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3188_win0_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &rk3188_win1_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const int rk3188_vop_intrs[] = {
/*
* hs_start interrupt fires at frame-start, so serves
* the same purpose as dsp_hold in the driver.
*/
DSP_HOLD_VALID_INTR,
FS_INTR,
LINE_FLAG_INTR,
BUS_ERROR_INTR,
};
static const struct vop_intr rk3188_vop_intr = {
.intrs = rk3188_vop_intrs,
.nintrs = ARRAY_SIZE(rk3188_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3188_INT_STATUS, 0xfff, 12),
.status = VOP_REG(RK3188_INT_STATUS, 0xf, 0),
.enable = VOP_REG(RK3188_INT_STATUS, 0xf, 4),
.clear = VOP_REG(RK3188_INT_STATUS, 0xf, 8),
};
static const struct vop_data rk3188_vop = {
.intr = &rk3188_vop_intr,
.common = &rk3188_common,
.modeset = &rk3188_modeset,
.output = &rk3188_output,
.win = rk3188_vop_win_data,
.win_size = ARRAY_SIZE(rk3188_vop_win_data),
.feature = VOP_FEATURE_INTERNAL_RGB,
.max_output = { 2048, 1536 },
};
static const struct vop_scl_extension rk3288_win_full_scl_ext = {
.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
.cbcr_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 28),
.cbcr_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 26),
.cbcr_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 24),
.yrgb_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 23),
.yrgb_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 22),
.yrgb_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 20),
.yrgb_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 18),
.yrgb_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 16),
.line_load_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 15),
.cbcr_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0x7, 12),
.yrgb_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0xf, 8),
.vsd_cbcr_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 7),
.vsd_cbcr_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 6),
.vsd_yrgb_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 5),
.vsd_yrgb_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 4),
.bic_coe_sel = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 2),
.cbcr_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 1),
.yrgb_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 0),
.lb_mode = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 5),
};
static const struct vop_scl_regs rk3288_win_full_scl = {
.ext = &rk3288_win_full_scl_ext,
.scale_yrgb_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
.scale_cbcr_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
static const struct vop_win_phy rk3288_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
.uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
.channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3288_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
.gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 12),
.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3288_WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3288_WIN2_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3288_WIN2_VIR0_1, 0x1fff, 0),
.src_alpha_ctl = VOP_REG(RK3288_WIN2_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN2_DST_ALPHA_CTRL, 0xff, 0),
};
static const struct vop_modeset rk3288_modeset = {
.htotal_pw = VOP_REG(RK3288_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
.hact_st_end = VOP_REG(RK3288_DSP_HACT_ST_END, 0x1fff1fff, 0),
.vtotal_pw = VOP_REG(RK3288_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
};
static const struct vop_output rk3288_output = {
.pin_pol = VOP_REG(RK3288_DSP_CTRL0, 0xf, 4),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
};
static const struct vop_common rk3288_common = {
.standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
.mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
.dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4),
.dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3),
.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
.dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG_SYNC(RK3288_REG_CFG_DONE, 0x1, 0),
};
/*
* Note: rk3288 has a dedicated 'cursor' window, however, that window requires
* special support to get alpha blending working. For now, just use overlay
* window 3 for the drm cursor.
*
*/
static const struct vop_win_data rk3288_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3288_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x40, .phy = &rk3288_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &rk3288_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x50, .phy = &rk3288_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const int rk3288_vop_intrs[] = {
DSP_HOLD_VALID_INTR,
FS_INTR,
LINE_FLAG_INTR,
BUS_ERROR_INTR,
};
static const struct vop_intr rk3288_vop_intr = {
.intrs = rk3288_vop_intrs,
.nintrs = ARRAY_SIZE(rk3288_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
.status = VOP_REG(RK3288_INTR_CTRL0, 0xf, 0),
.enable = VOP_REG(RK3288_INTR_CTRL0, 0xf, 4),
.clear = VOP_REG(RK3288_INTR_CTRL0, 0xf, 8),
};
static const struct vop_data rk3288_vop = {
.version = VOP_VERSION(3, 1),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3288_vop_intr,
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
.lut_size = 1024,
/*
* This is the maximum resolution for the VOPB, the VOPL can only do
* 2560x1600, but we can't distinguish them as they have the same
* compatible.
*/
.max_output = { 3840, 2160 },
};
static const int rk3368_vop_intrs[] = {
FS_INTR,
0, 0,
LINE_FLAG_INTR,
0,
BUS_ERROR_INTR,
0, 0, 0, 0, 0, 0, 0,
DSP_HOLD_VALID_INTR,
};
static const struct vop_intr rk3368_vop_intr = {
.intrs = rk3368_vop_intrs,
.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 0),
.line_flag_num[1] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 16),
.status = VOP_REG_MASK_SYNC(RK3368_INTR_STATUS, 0x3fff, 0),
.enable = VOP_REG_MASK_SYNC(RK3368_INTR_EN, 0x3fff, 0),
.clear = VOP_REG_MASK_SYNC(RK3368_INTR_CLEAR, 0x3fff, 0),
};
static const struct vop_win_phy rk3368_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
.uv_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 15),
.x_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 21),
.y_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 22),
.act_info = VOP_REG(RK3368_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3368_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3368_WIN0_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3368_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3368_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 0),
.uv_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3368_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3368_WIN0_DST_ALPHA_CTRL, 0xff, 0),
.channel = VOP_REG(RK3368_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3368_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
.y_mir_en = VOP_REG(RK3368_WIN2_CTRL1, 0x1, 15),
.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3368_WIN2_VIR0_1, 0x1fff, 0),
.src_alpha_ctl = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3368_WIN2_DST_ALPHA_CTRL, 0xff, 0),
};
static const struct vop_win_data rk3368_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x50, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_output rk3368_output = {
.rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
.hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
.edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
.mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
.rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
.hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
.edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
.mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
};
static const struct vop_misc rk3368_misc = {
.global_regdone_en = VOP_REG(RK3368_SYS_CTRL, 0x1, 11),
};
static const struct vop_data rk3368_vop = {
.version = VOP_VERSION(3, 2),
.intr = &rk3368_vop_intr,
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3368_output,
.misc = &rk3368_misc,
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
.max_output = { 4096, 2160 },
};
static const struct vop_intr rk3366_vop_intr = {
.intrs = rk3368_vop_intrs,
.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 0),
.line_flag_num[1] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 16),
.status = VOP_REG_MASK_SYNC(RK3366_INTR_STATUS0, 0xffff, 0),
.enable = VOP_REG_MASK_SYNC(RK3366_INTR_EN0, 0xffff, 0),
.clear = VOP_REG_MASK_SYNC(RK3366_INTR_CLEAR0, 0xffff, 0),
};
static const struct vop_data rk3366_vop = {
.version = VOP_VERSION(3, 4),
.intr = &rk3366_vop_intr,
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3368_output,
.misc = &rk3368_misc,
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
.max_output = { 4096, 2160 },
};
static const struct vop_output rk3399_output = {
.dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
.rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
.hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
.edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
.mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
.rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
.hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
.edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
.mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
};
static const struct vop_common rk3399_common = {
.standby = VOP_REG_SYNC(RK3399_SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
.mmu_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 20),
.dither_down_sel = VOP_REG(RK3399_DSP_CTRL1, 0x1, 4),
.dither_down_mode = VOP_REG(RK3399_DSP_CTRL1, 0x1, 3),
.dither_down_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3399_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
.dsp_lut_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 0),
.update_gamma_lut = VOP_REG(RK3399_DSP_CTRL1, 0x1, 7),
.lut_buffer_index = VOP_REG(RK3399_DBG_POST_REG1, 0x1, 1),
.data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3399_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG_SYNC(RK3399_REG_CFG_DONE, 0x1, 0),
};
static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = {
.y2r_coefficients = {
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 16),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 16),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 16),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 16),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 16, 0xffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 20, 0xffffffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 24, 0xffffffff, 0),
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 28, 0xffffffff, 0),
},
};
static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win23_data = { };
static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
{ .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1) },
{ .base = 0x60, .phy = &rk3399_yuv2yuv_win01_data,
.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
{ .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
{ .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
};
static const struct vop_win_phy rk3399_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full_afbc,
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
.uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
.x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
.channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
/*
* rk3399 vop big windows register layout is same as rk3288, but we
* have a separate rk3399 win data array here so that we can advertise
* AFBC on the primary plane.
*/
static const struct vop_win_data rk3399_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3399_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x50, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_afbc rk3399_vop_afbc = {
.rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
.enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
.win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
.format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
.hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
.hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
.pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
};
static const struct vop_data rk3399_vop_big = {
.version = VOP_VERSION(3, 5),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3366_vop_intr,
.common = &rk3399_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.afbc = &rk3399_vop_afbc,
.misc = &rk3368_misc,
.win = rk3399_vop_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
.lut_size = 1024,
.max_output = { 4096, 2160 },
};
static const struct vop_win_data rk3399_vop_lit_win_data[] = {
{ .base = 0x00, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR},
};
static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = {
{ .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1)},
{ .base = 0x60, .phy = &rk3399_yuv2yuv_win23_data },
};
static const struct vop_data rk3399_vop_lit = {
.version = VOP_VERSION(3, 6),
.intr = &rk3366_vop_intr,
.common = &rk3399_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.misc = &rk3368_misc,
.win = rk3399_vop_lit_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
.win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
.lut_size = 256,
.max_output = { 2560, 1600 },
};
static const struct vop_win_data rk3228_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3288_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x40, .phy = &rk3288_win01_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data rk3228_vop = {
.version = VOP_VERSION(3, 7),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3366_vop_intr,
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.misc = &rk3368_misc,
.win = rk3228_vop_win_data,
.win_size = ARRAY_SIZE(rk3228_vop_win_data),
.max_output = { 4096, 2160 },
};
static const struct vop_modeset rk3328_modeset = {
.htotal_pw = VOP_REG(RK3328_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
.hact_st_end = VOP_REG(RK3328_DSP_HACT_ST_END, 0x1fff1fff, 0),
.vtotal_pw = VOP_REG(RK3328_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
.vact_st_end = VOP_REG(RK3328_DSP_VACT_ST_END, 0x1fff1fff, 0),
.hpost_st_end = VOP_REG(RK3328_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
.vpost_st_end = VOP_REG(RK3328_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
};
static const struct vop_output rk3328_output = {
.rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
.hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
.edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
.mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
.rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
.hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
.edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
.mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
};
static const struct vop_misc rk3328_misc = {
.global_regdone_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 11),
};
static const struct vop_common rk3328_common = {
.standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22),
.dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4),
.dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3),
.dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
.dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG_SYNC(RK3328_REG_CFG_DONE, 0x1, 0),
};
static const struct vop_intr rk3328_vop_intr = {
.intrs = rk3368_vop_intrs,
.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
.line_flag_num[0] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 0),
.line_flag_num[1] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 16),
.status = VOP_REG_MASK_SYNC(RK3328_INTR_STATUS0, 0xffff, 0),
.enable = VOP_REG_MASK_SYNC(RK3328_INTR_EN0, 0xffff, 0),
.clear = VOP_REG_MASK_SYNC(RK3328_INTR_CLEAR0, 0xffff, 0),
};
static const struct vop_win_data rk3328_vop_win_data[] = {
{ .base = 0xd0, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x1d0, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x2d0, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data rk3328_vop = {
.version = VOP_VERSION(3, 8),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3328_vop_intr,
.common = &rk3328_common,
.modeset = &rk3328_modeset,
.output = &rk3328_output,
.misc = &rk3328_misc,
.win = rk3328_vop_win_data,
.win_size = ARRAY_SIZE(rk3328_vop_win_data),
.max_output = { 4096, 2160 },
};
static const struct of_device_id vop_driver_dt_match[] = {
{ .compatible = "rockchip,rk3036-vop",
.data = &rk3036_vop },
{ .compatible = "rockchip,rk3126-vop",
.data = &rk3126_vop },
{ .compatible = "rockchip,px30-vop-big",
.data = &px30_vop_big },
{ .compatible = "rockchip,px30-vop-lit",
.data = &px30_vop_lit },
{ .compatible = "rockchip,rk3066-vop",
.data = &rk3066_vop },
{ .compatible = "rockchip,rk3188-vop",
.data = &rk3188_vop },
{ .compatible = "rockchip,rk3288-vop",
.data = &rk3288_vop },
{ .compatible = "rockchip,rk3368-vop",
.data = &rk3368_vop },
{ .compatible = "rockchip,rk3366-vop",
.data = &rk3366_vop },
{ .compatible = "rockchip,rk3399-vop-big",
.data = &rk3399_vop_big },
{ .compatible = "rockchip,rk3399-vop-lit",
.data = &rk3399_vop_lit },
{ .compatible = "rockchip,rk3228-vop",
.data = &rk3228_vop },
{ .compatible = "rockchip,rk3328-vop",
.data = &rk3328_vop },
{},
};
MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
static int vop_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (!dev->of_node) {
DRM_DEV_ERROR(dev, "can't find vop devices\n");
return -ENODEV;
}
return component_add(dev, &vop_component_ops);
}
static void vop_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vop_component_ops);
}
struct platform_driver vop_platform_driver = {
.probe = vop_probe,
.remove_new = vop_remove,
.driver = {
.name = "rockchip-vop",
.of_match_table = vop_driver_dt_match,
},
};
| linux-master | drivers/gpu/drm/rockchip/rockchip_vop_reg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author: Chris Zhong <[email protected]>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/reset.h>
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
#define CDN_DP_SPDIF_CLK 200000000
#define FW_ALIVE_TIMEOUT_US 1000000
#define MAILBOX_RETRY_US 1000
#define MAILBOX_TIMEOUT_US 5000000
#define LINK_TRAINING_RETRY_MS 20
#define LINK_TRAINING_TIMEOUT_MS 500
void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, unsigned long clk)
{
writel(clk / 1000000, dp->regs + SW_CLK_H);
}
void cdn_dp_clock_reset(struct cdn_dp_device *dp)
{
u32 val;
val = DPTX_FRMR_DATA_CLK_RSTN_EN |
DPTX_FRMR_DATA_CLK_EN |
DPTX_PHY_DATA_RSTN_EN |
DPTX_PHY_DATA_CLK_EN |
DPTX_PHY_CHAR_RSTN_EN |
DPTX_PHY_CHAR_CLK_EN |
SOURCE_AUX_SYS_CLK_RSTN_EN |
SOURCE_AUX_SYS_CLK_EN |
DPTX_SYS_CLK_RSTN_EN |
DPTX_SYS_CLK_EN |
CFG_DPTX_VIF_CLK_RSTN_EN |
CFG_DPTX_VIF_CLK_EN;
writel(val, dp->regs + SOURCE_DPTX_CAR);
val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN;
writel(val, dp->regs + SOURCE_PHY_CAR);
val = SOURCE_PKT_SYS_RSTN_EN |
SOURCE_PKT_SYS_CLK_EN |
SOURCE_PKT_DATA_RSTN_EN |
SOURCE_PKT_DATA_CLK_EN;
writel(val, dp->regs + SOURCE_PKT_CAR);
val = SPDIF_CDR_CLK_RSTN_EN |
SPDIF_CDR_CLK_EN |
SOURCE_AIF_SYS_RSTN_EN |
SOURCE_AIF_SYS_CLK_EN |
SOURCE_AIF_CLK_RSTN_EN |
SOURCE_AIF_CLK_EN;
writel(val, dp->regs + SOURCE_AIF_CAR);
val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN |
SOURCE_CIPHER_SYS_CLK_EN |
SOURCE_CIPHER_CHAR_CLK_RSTN_EN |
SOURCE_CIPHER_CHAR_CLK_EN;
writel(val, dp->regs + SOURCE_CIPHER_CAR);
val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN |
SOURCE_CRYPTO_SYS_CLK_EN;
writel(val, dp->regs + SOURCE_CRYPTO_CAR);
/* enable Mailbox and PIF interrupt */
writel(0, dp->regs + APB_INT_MASK);
}
static int cdn_dp_mailbox_read(struct cdn_dp_device *dp)
{
int val, ret;
ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR,
val, !val, MAILBOX_RETRY_US,
MAILBOX_TIMEOUT_US);
if (ret < 0)
return ret;
return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff;
}
static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
{
int ret, full;
ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR,
full, !full, MAILBOX_RETRY_US,
MAILBOX_TIMEOUT_US);
if (ret < 0)
return ret;
writel(val, dp->regs + MAILBOX0_WR_DATA);
return 0;
}
static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
u8 module_id, u8 opcode,
u16 req_size)
{
u32 mbox_size, i;
u8 header[4];
int ret;
/* read the header of the message */
for (i = 0; i < 4; i++) {
ret = cdn_dp_mailbox_read(dp);
if (ret < 0)
return ret;
header[i] = ret;
}
mbox_size = (header[2] << 8) | header[3];
if (opcode != header[0] || module_id != header[1] ||
req_size != mbox_size) {
/*
* If the message in mailbox is not what we want, we need to
* clear the mailbox by reading its contents.
*/
for (i = 0; i < mbox_size; i++)
if (cdn_dp_mailbox_read(dp) < 0)
break;
return -EINVAL;
}
return 0;
}
static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
u8 *buff, u16 buff_size)
{
u32 i;
int ret;
for (i = 0; i < buff_size; i++) {
ret = cdn_dp_mailbox_read(dp);
if (ret < 0)
return ret;
buff[i] = ret;
}
return 0;
}
static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id,
u8 opcode, u16 size, u8 *message)
{
u8 header[4];
int ret, i;
header[0] = opcode;
header[1] = module_id;
header[2] = (size >> 8) & 0xff;
header[3] = size & 0xff;
for (i = 0; i < 4; i++) {
ret = cdp_dp_mailbox_write(dp, header[i]);
if (ret)
return ret;
}
for (i = 0; i < size; i++) {
ret = cdp_dp_mailbox_write(dp, message[i]);
if (ret)
return ret;
}
return 0;
}
static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
{
u8 msg[6];
msg[0] = (addr >> 8) & 0xff;
msg[1] = addr & 0xff;
msg[2] = (val >> 24) & 0xff;
msg[3] = (val >> 16) & 0xff;
msg[4] = (val >> 8) & 0xff;
msg[5] = val & 0xff;
return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER,
sizeof(msg), msg);
}
static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr,
u8 start_bit, u8 bits_no, u32 val)
{
u8 field[8];
field[0] = (addr >> 8) & 0xff;
field[1] = addr & 0xff;
field[2] = start_bit;
field[3] = bits_no;
field[4] = (val >> 24) & 0xff;
field[5] = (val >> 16) & 0xff;
field[6] = (val >> 8) & 0xff;
field[7] = val & 0xff;
return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD,
sizeof(field), field);
}
int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
{
u8 msg[5], reg[5];
int ret;
msg[0] = (len >> 8) & 0xff;
msg[1] = len & 0xff;
msg[2] = (addr >> 16) & 0xff;
msg[3] = (addr >> 8) & 0xff;
msg[4] = addr & 0xff;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD,
sizeof(msg), msg);
if (ret)
goto err_dpcd_read;
ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
DPTX_READ_DPCD,
sizeof(reg) + len);
if (ret)
goto err_dpcd_read;
ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
if (ret)
goto err_dpcd_read;
ret = cdn_dp_mailbox_read_receive(dp, data, len);
err_dpcd_read:
return ret;
}
int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
{
u8 msg[6], reg[5];
int ret;
msg[0] = 0;
msg[1] = 1;
msg[2] = (addr >> 16) & 0xff;
msg[3] = (addr >> 8) & 0xff;
msg[4] = addr & 0xff;
msg[5] = value;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD,
sizeof(msg), msg);
if (ret)
goto err_dpcd_write;
ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
DPTX_WRITE_DPCD, sizeof(reg));
if (ret)
goto err_dpcd_write;
ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
if (ret)
goto err_dpcd_write;
if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))
ret = -EINVAL;
err_dpcd_write:
if (ret)
DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret);
return ret;
}
int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
u32 i_size, const u32 *d_mem, u32 d_size)
{
u32 reg;
int i, ret;
/* reset ucpu before load firmware*/
writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET,
dp->regs + APB_CTRL);
for (i = 0; i < i_size; i += 4)
writel(*i_mem++, dp->regs + ADDR_IMEM + i);
for (i = 0; i < d_size; i += 4)
writel(*d_mem++, dp->regs + ADDR_DMEM + i);
/* un-reset ucpu */
writel(0, dp->regs + APB_CTRL);
/* check the keep alive register to make sure fw working */
ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE,
reg, reg, 2000, FW_ALIVE_TIMEOUT_US);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n",
reg);
return -EINVAL;
}
reg = readl(dp->regs + VER_L) & 0xff;
dp->fw_version = reg;
reg = readl(dp->regs + VER_H) & 0xff;
dp->fw_version |= reg << 8;
reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff;
dp->fw_version |= reg << 16;
reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
dp->fw_version |= reg << 24;
DRM_DEV_DEBUG(dp->dev, "firmware version: %x\n", dp->fw_version);
return 0;
}
int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable)
{
u8 msg[5];
int ret, i;
msg[0] = GENERAL_MAIN_CONTROL;
msg[1] = MB_MODULE_ID_GENERAL;
msg[2] = 0;
msg[3] = 1;
msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
for (i = 0; i < sizeof(msg); i++) {
ret = cdp_dp_mailbox_write(dp, msg[i]);
if (ret)
goto err_set_firmware_active;
}
/* read the firmware state */
for (i = 0; i < sizeof(msg); i++) {
ret = cdn_dp_mailbox_read(dp);
if (ret < 0)
goto err_set_firmware_active;
msg[i] = ret;
}
ret = 0;
err_set_firmware_active:
if (ret < 0)
DRM_DEV_ERROR(dp->dev, "set firmware active failed\n");
return ret;
}
int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip)
{
u8 msg[8];
int ret;
msg[0] = CDN_DP_MAX_LINK_RATE;
msg[1] = lanes | SCRAMBLER_EN;
msg[2] = VOLTAGE_LEVEL_2;
msg[3] = PRE_EMPHASIS_LEVEL_3;
msg[4] = PTS1 | PTS2 | PTS3 | PTS4;
msg[5] = FAST_LT_NOT_SUPPORT;
msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL;
msg[7] = ENHANCED;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
DPTX_SET_HOST_CAPABILITIES,
sizeof(msg), msg);
if (ret)
goto err_set_host_cap;
ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL,
AUX_HOST_INVERT);
err_set_host_cap:
if (ret)
DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret);
return ret;
}
int cdn_dp_event_config(struct cdn_dp_device *dp)
{
u8 msg[5];
int ret;
memset(msg, 0, sizeof(msg));
msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT,
sizeof(msg), msg);
if (ret)
DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret);
return ret;
}
u32 cdn_dp_get_event(struct cdn_dp_device *dp)
{
return readl(dp->regs + SW_EVENTS0);
}
int cdn_dp_get_hpd_status(struct cdn_dp_device *dp)
{
u8 status;
int ret;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE,
0, NULL);
if (ret)
goto err_get_hpd;
ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
DPTX_HPD_STATE, sizeof(status));
if (ret)
goto err_get_hpd;
ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status));
if (ret)
goto err_get_hpd;
return status;
err_get_hpd:
DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret);
return ret;
}
int cdn_dp_get_edid_block(void *data, u8 *edid,
unsigned int block, size_t length)
{
struct cdn_dp_device *dp = data;
u8 msg[2], reg[2], i;
int ret;
for (i = 0; i < 4; i++) {
msg[0] = block / 2;
msg[1] = block % 2;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID,
sizeof(msg), msg);
if (ret)
continue;
ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
DPTX_GET_EDID,
sizeof(reg) + length);
if (ret)
continue;
ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
if (ret)
continue;
ret = cdn_dp_mailbox_read_receive(dp, edid, length);
if (ret)
continue;
if (reg[0] == length && reg[1] == block / 2)
break;
}
if (ret)
DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block,
ret);
return ret;
}
static int cdn_dp_training_start(struct cdn_dp_device *dp)
{
unsigned long timeout;
u8 msg, event[2];
int ret;
msg = LINK_TRAINING_RUN;
/* start training */
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL,
sizeof(msg), &msg);
if (ret)
goto err_training_start;
timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS);
while (time_before(jiffies, timeout)) {
msleep(LINK_TRAINING_RETRY_MS);
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
DPTX_READ_EVENT, 0, NULL);
if (ret)
goto err_training_start;
ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
DPTX_READ_EVENT,
sizeof(event));
if (ret)
goto err_training_start;
ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event));
if (ret)
goto err_training_start;
if (event[1] & EQ_PHASE_FINISHED)
return 0;
}
ret = -ETIMEDOUT;
err_training_start:
DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret);
return ret;
}
static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
{
u8 status[10];
int ret;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT,
0, NULL);
if (ret)
goto err_get_training_status;
ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
DPTX_READ_LINK_STAT,
sizeof(status));
if (ret)
goto err_get_training_status;
ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status));
if (ret)
goto err_get_training_status;
dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
dp->max_lanes = status[1];
err_get_training_status:
if (ret)
DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret);
return ret;
}
int cdn_dp_train_link(struct cdn_dp_device *dp)
{
int ret;
ret = cdn_dp_training_start(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret);
return ret;
}
ret = cdn_dp_get_training_status(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret);
return ret;
}
DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
dp->max_lanes);
return ret;
}
int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active)
{
u8 msg;
int ret;
msg = !!active;
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO,
sizeof(msg), &msg);
if (ret)
DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret);
return ret;
}
static int cdn_dp_get_msa_misc(struct video_info *video,
struct drm_display_mode *mode)
{
u32 msa_misc;
u8 val[2] = {0};
switch (video->color_fmt) {
case PXL_RGB:
case Y_ONLY:
val[0] = 0;
break;
/* set YUV default color space conversion to BT601 */
case YCBCR_4_4_4:
val[0] = 6 + BT_601 * 8;
break;
case YCBCR_4_2_2:
val[0] = 5 + BT_601 * 8;
break;
case YCBCR_4_2_0:
val[0] = 5;
break;
}
switch (video->color_depth) {
case 6:
val[1] = 0;
break;
case 8:
val[1] = 1;
break;
case 10:
val[1] = 2;
break;
case 12:
val[1] = 3;
break;
case 16:
val[1] = 4;
break;
}
msa_misc = 2 * val[0] + 32 * val[1] +
((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
return msa_misc;
}
int cdn_dp_config_video(struct cdn_dp_device *dp)
{
struct video_info *video = &dp->video_info;
struct drm_display_mode *mode = &dp->mode;
u64 symbol;
u32 val, link_rate, rem;
u8 bit_per_pix, tu_size_reg = TU_SIZE;
int ret;
bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
(video->color_depth * 2) : (video->color_depth * 3);
link_rate = dp->max_rate / 1000;
ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
if (ret)
goto err_config_video;
ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0);
if (ret)
goto err_config_video;
/*
* get a best tu_size and valid symbol:
* 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32
* 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes)
* 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set
* TU += 2 and repeat 2nd step.
*/
do {
tu_size_reg += 2;
symbol = (u64)tu_size_reg * mode->clock * bit_per_pix;
do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
ret = -EINVAL;
DRM_DEV_ERROR(dp->dev,
"tu error, clk:%d, lanes:%d, rate:%d\n",
mode->clock, dp->max_lanes, link_rate);
goto err_config_video;
}
} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
(rem > 850) || (rem < 100));
val = symbol + (tu_size_reg << 8);
val |= TU_CNT_RST_EN;
ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val);
if (ret)
goto err_config_video;
/* set the FIFO Buffer size */
val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
val /= (dp->max_lanes * link_rate);
val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
val += 2;
ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
switch (video->color_depth) {
case 6:
val = BCS_6;
break;
case 8:
val = BCS_8;
break;
case 10:
val = BCS_10;
break;
case 12:
val = BCS_12;
break;
case 16:
val = BCS_16;
break;
}
val += video->color_fmt << 8;
ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
if (ret)
goto err_config_video;
val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0;
val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0;
ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val);
if (ret)
goto err_config_video;
val = (mode->hsync_start - mode->hdisplay) << 16;
val |= mode->htotal - mode->hsync_end;
ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val);
if (ret)
goto err_config_video;
val = mode->hdisplay * bit_per_pix / 8;
ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val);
if (ret)
goto err_config_video;
val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16);
ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val);
if (ret)
goto err_config_video;
val = mode->hsync_end - mode->hsync_start;
val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15);
ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val);
if (ret)
goto err_config_video;
val = mode->vtotal;
val |= (mode->vtotal - mode->vsync_start) << 16;
ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val);
if (ret)
goto err_config_video;
val = mode->vsync_end - mode->vsync_start;
val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15);
ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val);
if (ret)
goto err_config_video;
val = cdn_dp_get_msa_misc(video, mode);
ret = cdn_dp_reg_write(dp, MSA_MISC, val);
if (ret)
goto err_config_video;
ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1);
if (ret)
goto err_config_video;
val = mode->hsync_end - mode->hsync_start;
val |= mode->hdisplay << 16;
ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val);
if (ret)
goto err_config_video;
val = mode->vdisplay;
val |= (mode->vtotal - mode->vsync_start) << 16;
ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val);
if (ret)
goto err_config_video;
val = mode->vtotal;
ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val);
if (ret)
goto err_config_video;
ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0);
err_config_video:
if (ret)
DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret);
return ret;
}
int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
{
int ret;
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
if (ret) {
DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret);
return ret;
}
writel(0, dp->regs + SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
writel(0, dp->regs + AUDIO_SRC_CNTL);
writel(0, dp->regs + AUDIO_SRC_CNFG);
writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL);
writel(0, dp->regs + AUDIO_SRC_CNTL);
/* reset smpl2pckt component */
writel(0, dp->regs + SMPL2PKT_CNTL);
writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL);
writel(0, dp->regs + SMPL2PKT_CNTL);
/* reset FIFO */
writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL);
writel(0, dp->regs + FIFO_CNTL);
if (audio->format == AFMT_SPDIF)
clk_disable_unprepare(dp->spdif_clk);
return 0;
}
int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable)
{
int ret;
ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable);
if (ret)
DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret);
return ret;
}
static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
struct audio_info *audio)
{
int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
u32 val;
if (audio->channels == 2) {
if (dp->max_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
i2s_port_en_val = 1;
} else if (audio->channels == 4) {
i2s_port_en_val = 3;
}
writel(0x0, dp->regs + SPDIF_CTRL_ADDR);
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(audio->channels);
val |= NUM_OF_I2S_PORTS(audio->channels);
val |= AUDIO_TYPE_LPCM;
val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
writel(val, dp->regs + SMPL2PKT_CNFG);
if (audio->sample_width == 16)
val = 0;
else if (audio->sample_width == 24)
val = 1 << 9;
else
val = 2 << 9;
val |= AUDIO_CH_NUM(audio->channels);
val |= I2S_DEC_PORT_EN(i2s_port_en_val);
val |= TRANS_SMPL_WIDTH_32;
writel(val, dp->regs + AUDIO_SRC_CNFG);
for (i = 0; i < (audio->channels + 1) / 2; i++) {
if (audio->sample_width == 16)
val = (0x02 << 8) | (0x02 << 20);
else if (audio->sample_width == 24)
val = (0x0b << 8) | (0x0b << 20);
val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
writel(val, dp->regs + STTS_BIT_CH(i));
}
switch (audio->sample_rate) {
case 32000:
val = SAMPLING_FREQ(3) |
ORIGINAL_SAMP_FREQ(0xc);
break;
case 44100:
val = SAMPLING_FREQ(0) |
ORIGINAL_SAMP_FREQ(0xf);
break;
case 48000:
val = SAMPLING_FREQ(2) |
ORIGINAL_SAMP_FREQ(0xd);
break;
case 88200:
val = SAMPLING_FREQ(8) |
ORIGINAL_SAMP_FREQ(0x7);
break;
case 96000:
val = SAMPLING_FREQ(0xa) |
ORIGINAL_SAMP_FREQ(5);
break;
case 176400:
val = SAMPLING_FREQ(0xc) |
ORIGINAL_SAMP_FREQ(3);
break;
case 192000:
val = SAMPLING_FREQ(0xe) |
ORIGINAL_SAMP_FREQ(1);
break;
}
val |= 4;
writel(val, dp->regs + COM_CH_STTS_BITS);
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL);
}
static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
{
u32 val;
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
writel(val, dp->regs + SMPL2PKT_CNFG);
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
writel(val, dp->regs + SPDIF_CTRL_ADDR);
clk_prepare_enable(dp->spdif_clk);
clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK);
}
int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio)
{
int ret;
/* reset the spdif clk before config */
if (audio->format == AFMT_SPDIF) {
reset_control_assert(dp->spdif_rst);
reset_control_deassert(dp->spdif_rst);
}
ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC);
if (ret)
goto err_audio_config;
ret = cdn_dp_reg_write(dp, CM_CTRL, 0);
if (ret)
goto err_audio_config;
if (audio->format == AFMT_I2S)
cdn_dp_audio_config_i2s(dp, audio);
else if (audio->format == AFMT_SPDIF)
cdn_dp_audio_config_spdif(dp);
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
err_audio_config:
if (ret)
DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret);
return ret;
}
| linux-master | drivers/gpu/drm/rockchip/cdn-dp-reg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <drm/drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
#ifdef CONFIG_DRM_ANALOGIX_DP
#include <drm/bridge/analogix_dp.h>
#endif
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
#define VOP_WIN_SET(vop, win, name, v) \
vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
#define VOP_SCL_SET(vop, win, name, v) \
vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
#define VOP_SCL_SET_EXT(vop, win, name, v) \
vop_reg_set(vop, &win->phy->scl->ext->name, \
win->base, ~0, v, #name)
#define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \
do { \
if (win_yuv2yuv && win_yuv2yuv->name.mask) \
vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \
} while (0)
#define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \
do { \
if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \
vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \
} while (0)
#define VOP_INTR_SET_MASK(vop, name, mask, v) \
vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
#define VOP_REG_SET(vop, group, name, v) \
vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
#define VOP_HAS_REG(vop, group, name) \
(!!(vop->data->group->name.mask))
#define VOP_INTR_SET_TYPE(vop, name, type, v) \
do { \
int i, reg = 0, mask = 0; \
for (i = 0; i < vop->data->intr->nintrs; i++) { \
if (vop->data->intr->intrs[i] & type) { \
reg |= (v) << i; \
mask |= 1 << i; \
} \
} \
VOP_INTR_SET_MASK(vop, name, mask, reg); \
} while (0)
#define VOP_INTR_GET_TYPE(vop, name, type) \
vop_get_intr_type(vop, &vop->data->intr->name, type)
#define VOP_WIN_GET(vop, win, name) \
vop_read_reg(vop, win->base, &win->phy->name)
#define VOP_WIN_HAS_REG(win, name) \
(!!(win->phy->name.mask))
#define VOP_WIN_GET_YRGBADDR(vop, win) \
vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
#define VOP_WIN_TO_INDEX(vop_win) \
((vop_win) - (vop_win)->vop->win)
#define VOP_AFBC_SET(vop, name, v) \
do { \
if ((vop)->data->afbc) \
vop_reg_set((vop), &(vop)->data->afbc->name, \
0, ~0, v, #name); \
} while (0)
#define to_vop(x) container_of(x, struct vop, crtc)
#define to_vop_win(x) container_of(x, struct vop_win, base)
#define AFBC_FMT_RGB565 0x0
#define AFBC_FMT_U8U8U8U8 0x5
#define AFBC_FMT_U8U8U8 0x4
#define AFBC_TILE_16x16 BIT(4)
/*
* The coefficients of the following matrix are all fixed points.
* The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
* They are all represented in two's complement.
*/
static const uint32_t bt601_yuv2rgb[] = {
0x4A8, 0x0, 0x662,
0x4A8, 0x1E6F, 0x1CBF,
0x4A8, 0x812, 0x0,
0x321168, 0x0877CF, 0x2EB127
};
enum vop_pending {
VOP_PENDING_FB_UNREF,
};
struct vop_win {
struct drm_plane base;
const struct vop_win_data *data;
const struct vop_win_yuv2yuv_data *yuv2yuv_data;
struct vop *vop;
};
struct rockchip_rgb;
struct vop {
struct drm_crtc crtc;
struct device *dev;
struct drm_device *drm_dev;
bool is_enabled;
struct completion dsp_hold_completion;
unsigned int win_enabled;
/* protected by dev->event_lock */
struct drm_pending_vblank_event *event;
struct drm_flip_work fb_unref_work;
unsigned long pending;
struct completion line_flag_completion;
const struct vop_data *data;
uint32_t *regsbak;
void __iomem *regs;
void __iomem *lut_regs;
/* physical map length of vop register */
uint32_t len;
/* one time only one process allowed to config the register */
spinlock_t reg_lock;
/* lock vop irq reg */
spinlock_t irq_lock;
/* protects crtc enable/disable */
struct mutex vop_lock;
unsigned int irq;
/* vop AHP clk */
struct clk *hclk;
/* vop dclk */
struct clk *dclk;
/* vop share memory frequency */
struct clk *aclk;
/* vop dclk reset */
struct reset_control *dclk_rst;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
struct vop_win win[];
};
static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
{
return readl(vop->regs + offset);
}
static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
const struct vop_reg *reg)
{
return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
}
static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
uint32_t _offset, uint32_t _mask, uint32_t v,
const char *reg_name)
{
int offset, mask, shift;
if (!reg || !reg->mask) {
DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name);
return;
}
offset = reg->offset + _offset;
mask = reg->mask & _mask;
shift = reg->shift;
if (reg->write_mask) {
v = ((v << shift) & 0xffff) | (mask << (shift + 16));
} else {
uint32_t cached_val = vop->regsbak[offset >> 2];
v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
vop->regsbak[offset >> 2] = v;
}
if (reg->relaxed)
writel_relaxed(v, vop->regs + offset);
else
writel(v, vop->regs + offset);
}
static inline uint32_t vop_get_intr_type(struct vop *vop,
const struct vop_reg *reg, int type)
{
uint32_t i, ret = 0;
uint32_t regs = vop_read_reg(vop, 0, reg);
for (i = 0; i < vop->data->intr->nintrs; i++) {
if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
ret |= vop->data->intr->intrs[i];
}
return ret;
}
static inline void vop_cfg_done(struct vop *vop)
{
VOP_REG_SET(vop, common, cfg_done, 1);
}
static bool has_rb_swapped(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565:
return true;
default:
return false;
}
}
static bool has_uv_swapped(uint32_t format)
{
switch (format) {
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
return true;
default:
return false;
}
}
static enum vop_data_format vop_convert_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return VOP_FMT_ARGB8888;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
return VOP_FMT_RGB888;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return VOP_FMT_RGB565;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return VOP_FMT_YUV420SP;
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return VOP_FMT_YUV422SP;
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return VOP_FMT_YUV444SP;
default:
DRM_ERROR("unsupported format[%08x]\n", format);
return -EINVAL;
}
}
static int vop_convert_afbc_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return AFBC_FMT_U8U8U8U8;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
return AFBC_FMT_U8U8U8;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return AFBC_FMT_RGB565;
default:
DRM_DEBUG_KMS("unsupported AFBC format[%08x]\n", format);
return -EINVAL;
}
}
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
{
uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
if (vskiplines)
*vskiplines = 0;
if (is_horizontal) {
if (mode == SCALE_UP)
val = GET_SCL_FT_BIC(src, dst);
else if (mode == SCALE_DOWN)
val = GET_SCL_FT_BILI_DN(src, dst);
} else {
if (mode == SCALE_UP) {
if (vsu_mode == SCALE_UP_BIL)
val = GET_SCL_FT_BILI_UP(src, dst);
else
val = GET_SCL_FT_BIC(src, dst);
} else if (mode == SCALE_DOWN) {
if (vskiplines) {
*vskiplines = scl_get_vskiplines(src, dst);
val = scl_get_bili_dn_vskip(src, dst,
*vskiplines);
} else {
val = GET_SCL_FT_BILI_DN(src, dst);
}
}
}
return val;
}
static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint32_t src_w, uint32_t src_h, uint32_t dst_w,
uint32_t dst_h, const struct drm_format_info *info)
{
uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
uint16_t cbcr_hor_scl_mode = SCALE_NONE;
uint16_t cbcr_ver_scl_mode = SCALE_NONE;
bool is_yuv = false;
uint16_t cbcr_src_w = src_w / info->hsub;
uint16_t cbcr_src_h = src_h / info->vsub;
uint16_t vsu_mode;
uint16_t lb_mode;
uint32_t val;
int vskiplines;
if (info->is_yuv)
is_yuv = true;
if (dst_w > 3840) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
return;
}
if (!win->phy->scl->ext) {
VOP_SCL_SET(vop, win, scale_yrgb_x,
scl_cal_scale2(src_w, dst_w));
VOP_SCL_SET(vop, win, scale_yrgb_y,
scl_cal_scale2(src_h, dst_h));
if (is_yuv) {
VOP_SCL_SET(vop, win, scale_cbcr_x,
scl_cal_scale2(cbcr_src_w, dst_w));
VOP_SCL_SET(vop, win, scale_cbcr_y,
scl_cal_scale2(cbcr_src_h, dst_h));
}
return;
}
yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
if (is_yuv) {
cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
if (cbcr_hor_scl_mode == SCALE_DOWN)
lb_mode = scl_vop_cal_lb_mode(dst_w, true);
else
lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
} else {
if (yrgb_hor_scl_mode == SCALE_DOWN)
lb_mode = scl_vop_cal_lb_mode(dst_w, false);
else
lb_mode = scl_vop_cal_lb_mode(src_w, false);
}
VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
if (lb_mode == LB_RGB_3840X2) {
if (yrgb_ver_scl_mode != SCALE_NONE) {
DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
return;
}
if (cbcr_ver_scl_mode != SCALE_NONE) {
DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
return;
}
vsu_mode = SCALE_UP_BIL;
} else if (lb_mode == LB_RGB_2560X4) {
vsu_mode = SCALE_UP_BIL;
} else {
vsu_mode = SCALE_UP_BIC;
}
val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
true, 0, NULL);
VOP_SCL_SET(vop, win, scale_yrgb_x, val);
val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
false, vsu_mode, &vskiplines);
VOP_SCL_SET(vop, win, scale_yrgb_y, val);
VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
if (is_yuv) {
val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
dst_w, true, 0, NULL);
VOP_SCL_SET(vop, win, scale_cbcr_x, val);
val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
dst_h, false, vsu_mode, &vskiplines);
VOP_SCL_SET(vop, win, scale_cbcr_y, val);
VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
}
}
static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
{
unsigned long flags;
if (WARN_ON(!vop->is_enabled))
return;
spin_lock_irqsave(&vop->irq_lock, flags);
VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
{
unsigned long flags;
if (WARN_ON(!vop->is_enabled))
return;
spin_lock_irqsave(&vop->irq_lock, flags);
VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
/*
* (1) each frame starts at the start of the Vsync pulse which is signaled by
* the "FRAME_SYNC" interrupt.
* (2) the active data region of each frame ends at dsp_vact_end
* (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
* to get "LINE_FLAG" interrupt at the end of the active on screen data.
*
* VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
* Interrupts
* LINE_FLAG -------------------------------+
* FRAME_SYNC ----+ |
* | |
* v v
* | Vsync | Vbp | Vactive | Vfp |
* ^ ^ ^ ^
* | | | |
* | | | |
* dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
* dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
* dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
* dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
*/
static bool vop_line_flag_irq_is_enabled(struct vop *vop)
{
uint32_t line_flag_irq;
unsigned long flags;
spin_lock_irqsave(&vop->irq_lock, flags);
line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
spin_unlock_irqrestore(&vop->irq_lock, flags);
return !!line_flag_irq;
}
static void vop_line_flag_irq_enable(struct vop *vop)
{
unsigned long flags;
if (WARN_ON(!vop->is_enabled))
return;
spin_lock_irqsave(&vop->irq_lock, flags);
VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
static void vop_line_flag_irq_disable(struct vop *vop)
{
unsigned long flags;
if (WARN_ON(!vop->is_enabled))
return;
spin_lock_irqsave(&vop->irq_lock, flags);
VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
static int vop_core_clks_enable(struct vop *vop)
{
int ret;
ret = clk_enable(vop->hclk);
if (ret < 0)
return ret;
ret = clk_enable(vop->aclk);
if (ret < 0)
goto err_disable_hclk;
return 0;
err_disable_hclk:
clk_disable(vop->hclk);
return ret;
}
static void vop_core_clks_disable(struct vop *vop)
{
clk_disable(vop->aclk);
clk_disable(vop->hclk);
}
static void vop_win_disable(struct vop *vop, const struct vop_win *vop_win)
{
const struct vop_win_data *win = vop_win->data;
if (win->phy->scl && win->phy->scl->ext) {
VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
}
VOP_WIN_SET(vop, win, enable, 0);
vop->win_enabled &= ~BIT(VOP_WIN_TO_INDEX(vop_win));
}
static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
{
struct vop *vop = to_vop(crtc);
int ret, i;
ret = pm_runtime_resume_and_get(vop->dev);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
ret = vop_core_clks_enable(vop);
if (WARN_ON(ret < 0))
goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
if (WARN_ON(ret < 0))
goto err_disable_core;
/*
* Slave iommu shares power, irq and clock with vop. It was associated
* automatically with this master device via common driver code.
* Now that we have enabled the clock we attach it to the shared drm
* mapping.
*/
ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
if (ret) {
DRM_DEV_ERROR(vop->dev,
"failed to attach dma mapping, %d\n", ret);
goto err_disable_dclk;
}
spin_lock(&vop->reg_lock);
for (i = 0; i < vop->len; i += 4)
writel_relaxed(vop->regsbak[i / 4], vop->regs + i);
/*
* We need to make sure that all windows are disabled before we
* enable the crtc. Otherwise we might try to scan from a destroyed
* buffer later.
*
* In the case of enable-after-PSR, we don't need to worry about this
* case since the buffer is guaranteed to be valid and disabling the
* window will result in screen glitches on PSR exit.
*/
if (!old_state || !old_state->self_refresh_active) {
for (i = 0; i < vop->data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
vop_win_disable(vop, vop_win);
}
}
if (vop->data->afbc) {
struct rockchip_crtc_state *s;
/*
* Disable AFBC and forget there was a vop window with AFBC
*/
VOP_AFBC_SET(vop, enable, 0);
s = to_rockchip_crtc_state(crtc->state);
s->enable_afbc = false;
}
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
/*
* At here, vop clock & iommu is enable, R/W vop regs would be safe.
*/
vop->is_enabled = true;
spin_lock(&vop->reg_lock);
VOP_REG_SET(vop, common, standby, 1);
spin_unlock(&vop->reg_lock);
drm_crtc_vblank_on(crtc);
return 0;
err_disable_dclk:
clk_disable(vop->dclk);
err_disable_core:
vop_core_clks_disable(vop);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
}
static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled)
{
struct vop *vop = to_vop(crtc);
int i;
spin_lock(&vop->reg_lock);
for (i = 0; i < vop->data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
VOP_WIN_SET(vop, win, enable,
enabled && (vop->win_enabled & BIT(i)));
}
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
}
static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop *vop = to_vop(crtc);
WARN_ON(vop->event);
if (crtc->state->self_refresh_active)
rockchip_drm_set_win_enabled(crtc, false);
if (crtc->state->self_refresh_active)
goto out;
mutex_lock(&vop->vop_lock);
drm_crtc_vblank_off(crtc);
/*
* Vop standby will take effect at end of current frame,
* if dsp hold valid irq happen, it means standby complete.
*
* we must wait standby complete when we want to disable aclk,
* if not, memory bus maybe dead.
*/
reinit_completion(&vop->dsp_hold_completion);
vop_dsp_hold_valid_irq_enable(vop);
spin_lock(&vop->reg_lock);
VOP_REG_SET(vop, common, standby, 1);
spin_unlock(&vop->reg_lock);
if (!wait_for_completion_timeout(&vop->dsp_hold_completion,
msecs_to_jiffies(200)))
WARN(1, "%s: timed out waiting for DSP hold", crtc->name);
vop_dsp_hold_valid_irq_disable(vop);
vop->is_enabled = false;
/*
* vop standby complete, so iommu detach is safe.
*/
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
clk_disable(vop->dclk);
vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
mutex_unlock(&vop->vop_lock);
out:
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
}
static void vop_plane_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
}
static inline bool rockchip_afbc(u64 modifier)
{
return modifier == ROCKCHIP_AFBC_MOD;
}
static bool rockchip_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (!rockchip_afbc(modifier)) {
DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
return false;
}
return vop_convert_afbc_format(format) >= 0;
}
static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int ret;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
DRM_PLANE_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_NO_SCALING;
if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
return ret;
if (!new_plane_state->visible)
return 0;
ret = vop_convert_format(fb->format->format);
if (ret < 0)
return ret;
/*
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n");
return -EINVAL;
}
if (rockchip_afbc(fb->modifier)) {
struct vop *vop = to_vop(crtc);
if (!vop->data->afbc) {
DRM_DEBUG_KMS("vop does not support AFBC\n");
return -EINVAL;
}
ret = vop_convert_afbc_format(fb->format->format);
if (ret < 0)
return ret;
if (new_plane_state->src.x1 || new_plane_state->src.y1) {
DRM_DEBUG_KMS("AFBC does not support offset display, " \
"xpos=%d, ypos=%d, offset=%d\n",
new_plane_state->src.x1, new_plane_state->src.y1,
fb->offsets[0]);
return -EINVAL;
}
if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n",
new_plane_state->rotation);
return -EINVAL;
}
}
return 0;
}
static void vop_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct vop_win *vop_win = to_vop_win(plane);
struct vop *vop = to_vop(old_state->crtc);
if (!old_state->crtc)
return;
spin_lock(&vop->reg_lock);
vop_win_disable(vop, vop_win);
spin_unlock(&vop->reg_lock);
}
static void vop_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc *crtc = new_state->crtc;
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data;
struct vop *vop = to_vop(new_state->crtc);
struct drm_framebuffer *fb = new_state->fb;
unsigned int actual_w, actual_h;
unsigned int dsp_stx, dsp_sty;
uint32_t act_info, dsp_info, dsp_st;
struct drm_rect *src = &new_state->src;
struct drm_rect *dest = &new_state->dst;
struct drm_gem_object *obj, *uv_obj;
struct rockchip_gem_object *rk_obj, *rk_uv_obj;
unsigned long offset;
dma_addr_t dma_addr;
uint32_t val;
bool rb_swap, uv_swap;
int win_index = VOP_WIN_TO_INDEX(vop_win);
int format;
int is_yuv = fb->format->is_yuv;
int i;
/*
* can't update plane when vop is disabled.
*/
if (WARN_ON(!crtc))
return;
if (WARN_ON(!vop->is_enabled))
return;
if (!new_state->visible) {
vop_plane_atomic_disable(plane, state);
return;
}
obj = fb->obj[0];
rk_obj = to_rockchip_obj(obj);
actual_w = drm_rect_width(src) >> 16;
actual_h = drm_rect_height(src) >> 16;
act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
dsp_info = (drm_rect_height(dest) - 1) << 16;
dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
offset = (src->x1 >> 16) * fb->format->cpp[0];
offset += (src->y1 >> 16) * fb->pitches[0];
dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
/*
* For y-mirroring we need to move address
* to the beginning of the last line.
*/
if (new_state->rotation & DRM_MODE_REFLECT_Y)
dma_addr += (actual_h - 1) * fb->pitches[0];
format = vop_convert_format(fb->format->format);
spin_lock(&vop->reg_lock);
if (rockchip_afbc(fb->modifier)) {
int afbc_format = vop_convert_afbc_format(fb->format->format);
VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
VOP_AFBC_SET(vop, hreg_block_split, 0);
VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
VOP_AFBC_SET(vop, pic_size, act_info);
}
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv);
VOP_WIN_SET(vop, win, y_mir_en,
(new_state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0);
VOP_WIN_SET(vop, win, x_mir_en,
(new_state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
if (is_yuv) {
int hsub = fb->format->hsub;
int vsub = fb->format->vsub;
int bpp = fb->format->cpp[1];
uv_obj = fb->obj[1];
rk_uv_obj = to_rockchip_obj(uv_obj);
offset = (src->x1 >> 16) * bpp / hsub;
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) {
VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop,
win_yuv2yuv,
y2r_coefficients[i],
bt601_yuv2rgb[i]);
}
uv_swap = has_uv_swapped(fb->format->format);
VOP_WIN_SET(vop, win, uv_swap, uv_swap);
}
if (win->phy->scl)
scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
drm_rect_width(dest), drm_rect_height(dest),
fb->format);
VOP_WIN_SET(vop, win, act_info, act_info);
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st);
rb_swap = has_rb_swapped(fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
/*
* Blending win0 with the background color doesn't seem to work
* correctly. We only get the background color, no matter the contents
* of the win0 framebuffer. However, blending pre-multiplied color
* with the default opaque black default background color is a no-op,
* so we can just disable blending to get the correct result.
*/
if (fb->format->has_alpha && win_index > 0) {
VOP_WIN_SET(vop, win, dst_alpha_ctl,
DST_FACTOR_M0(ALPHA_SRC_INVERSE));
val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
SRC_ALPHA_M0(ALPHA_STRAIGHT) |
SRC_BLEND_M0(ALPHA_PER_PIX) |
SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
SRC_FACTOR_M0(ALPHA_ONE);
VOP_WIN_SET(vop, win, src_alpha_ctl, val);
VOP_WIN_SET(vop, win, alpha_pre_mul, ALPHA_SRC_PRE_MUL);
VOP_WIN_SET(vop, win, alpha_mode, ALPHA_PER_PIX);
VOP_WIN_SET(vop, win, alpha_en, 1);
} else {
VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
VOP_WIN_SET(vop, win, alpha_en, 0);
}
VOP_WIN_SET(vop, win, enable, 1);
vop->win_enabled |= BIT(win_index);
spin_unlock(&vop->reg_lock);
}
static int vop_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
DRM_PLANE_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_NO_SCALING;
struct drm_crtc_state *crtc_state;
if (plane != new_plane_state->crtc->cursor)
return -EINVAL;
if (!plane->state)
return -EINVAL;
if (!plane->state->fb)
return -EINVAL;
if (state)
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
else /* Special case for asynchronous cursor updates. */
crtc_state = plane->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
min_scale, max_scale,
true, true);
}
static void vop_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct vop *vop = to_vop(plane->state->crtc);
struct drm_framebuffer *old_fb = plane->state->fb;
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
plane->state->crtc_h = new_state->crtc_h;
plane->state->crtc_w = new_state->crtc_w;
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
if (vop->is_enabled) {
vop_plane_atomic_update(plane, state);
spin_lock(&vop->reg_lock);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
/*
* A scanout can still be occurring, so we can't drop the
* reference to the old framebuffer. To solve this we get a
* reference to old_fb and set a worker to release it later.
* FIXME: if we perform 500 async_update calls before the
* vblank, then we can have 500 different framebuffers waiting
* to be released.
*/
if (old_fb && plane->state->fb != old_fb) {
drm_framebuffer_get(old_fb);
WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
drm_flip_work_queue(&vop->fb_unref_work, old_fb);
set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
}
}
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_check = vop_plane_atomic_check,
.atomic_update = vop_plane_atomic_update,
.atomic_disable = vop_plane_atomic_disable,
.atomic_async_check = vop_plane_atomic_async_check,
.atomic_async_update = vop_plane_atomic_async_update,
};
static const struct drm_plane_funcs vop_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vop_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.format_mod_supported = rockchip_mod_supported,
};
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
unsigned long flags;
if (WARN_ON(!vop->is_enabled))
return -EPERM;
spin_lock_irqsave(&vop->irq_lock, flags);
VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
spin_unlock_irqrestore(&vop->irq_lock, flags);
return 0;
}
static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
unsigned long flags;
if (WARN_ON(!vop->is_enabled))
return;
spin_lock_irqsave(&vop->irq_lock, flags);
VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
static enum drm_mode_status vop_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct vop *vop = to_vop(crtc);
if (vop->data->max_output.width && mode->hdisplay > vop->data->max_output.width)
return MODE_BAD_HVALUE;
return MODE_OK;
}
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
unsigned long rate;
/*
* Clock craziness.
*
* Key points:
*
* - DRM works in kHz.
* - Clock framework works in Hz.
* - Rockchip's clock driver picks the clock rate that is the
* same _OR LOWER_ than the one requested.
*
* Action plan:
*
* 1. Try to set the exact rate first, and confirm the clock framework
* can provide it.
*
* 2. If the clock framework cannot provide the exact rate, we should
* add 999 Hz to the requested rate. That way if the clock we need
* is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then
* the clock framework will actually give us the right clock.
*
* 3. Get the clock framework to round the rate for us to tell us
* what it will actually make.
*
* 4. Store the rounded up rate so that we don't need to worry about
* this in the actual clk_set_rate().
*/
rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000);
if (rate / 1000 != adjusted_mode->clock)
rate = clk_round_rate(vop->dclk,
adjusted_mode->clock * 1000 + 999);
adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}
static bool vop_dsp_lut_is_enabled(struct vop *vop)
{
return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
}
static u32 vop_lut_buffer_index(struct vop *vop)
{
return vop_read_reg(vop, 0, &vop->data->common->lut_buffer_index);
}
static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
{
struct drm_color_lut *lut = crtc->state->gamma_lut->data;
unsigned int i, bpc = ilog2(vop->data->lut_size);
for (i = 0; i < crtc->gamma_size; i++) {
u32 word;
word = (drm_color_lut_extract(lut[i].red, bpc) << (2 * bpc)) |
(drm_color_lut_extract(lut[i].green, bpc) << bpc) |
drm_color_lut_extract(lut[i].blue, bpc);
writel(word, vop->lut_regs + i * 4);
}
}
static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct drm_crtc_state *state = crtc->state;
unsigned int idle;
u32 lut_idx, old_idx;
int ret;
if (!vop->lut_regs)
return;
if (!state->gamma_lut || !VOP_HAS_REG(vop, common, update_gamma_lut)) {
/*
* To disable gamma (gamma_lut is null) or to write
* an update to the LUT, clear dsp_lut_en.
*/
spin_lock(&vop->reg_lock);
VOP_REG_SET(vop, common, dsp_lut_en, 0);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
/*
* In order to write the LUT to the internal memory,
* we need to first make sure the dsp_lut_en bit is cleared.
*/
ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
idle, !idle, 5, 30 * 1000);
if (ret) {
DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
return;
}
if (!state->gamma_lut)
return;
} else {
/*
* On RK3399 the gamma LUT can updated without clearing dsp_lut_en,
* by setting update_gamma_lut then waiting for lut_buffer_index change
*/
old_idx = vop_lut_buffer_index(vop);
}
spin_lock(&vop->reg_lock);
vop_crtc_write_gamma_lut(vop, crtc);
VOP_REG_SET(vop, common, dsp_lut_en, 1);
VOP_REG_SET(vop, common, update_gamma_lut, 1);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
if (VOP_HAS_REG(vop, common, update_gamma_lut)) {
ret = readx_poll_timeout(vop_lut_buffer_index, vop,
lut_idx, lut_idx != old_idx, 5, 30 * 1000);
if (ret) {
DRM_DEV_ERROR(vop->dev, "gamma LUT update timeout!\n");
return;
}
/*
* update_gamma_lut is auto cleared by HW, but write 0 to clear the bit
* in our backup of the regs.
*/
spin_lock(&vop->reg_lock);
VOP_REG_SET(vop, common, update_gamma_lut, 0);
spin_unlock(&vop->reg_lock);
}
}
static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct vop *vop = to_vop(crtc);
/*
* Only update GAMMA if the 'active' flag is not changed,
* otherwise it's updated by .atomic_enable.
*/
if (crtc_state->color_mgmt_changed &&
!crtc_state->active_changed)
vop_crtc_gamma_set(vop, crtc, old_crtc_state);
}
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct vop *vop = to_vop(crtc);
const struct vop_data *vop_data = vop->data;
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
u16 hdisplay = adjusted_mode->hdisplay;
u16 htotal = adjusted_mode->htotal;
u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
u16 hact_end = hact_st + hdisplay;
u16 vdisplay = adjusted_mode->vdisplay;
u16 vtotal = adjusted_mode->vtotal;
u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
uint32_t pin_pol, val;
int dither_bpc = s->output_bpc ? s->output_bpc : 10;
int ret;
if (old_state && old_state->self_refresh_active) {
drm_crtc_vblank_on(crtc);
rockchip_drm_set_win_enabled(crtc, true);
return;
}
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
ret = vop_enable(crtc, old_state);
if (ret) {
mutex_unlock(&vop->vop_lock);
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
VOP_REG_SET(vop, output, pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_dual_channel_en, 0);
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
VOP_REG_SET(vop, output, rgb_en, 1);
break;
case DRM_MODE_CONNECTOR_eDP:
VOP_REG_SET(vop, output, edp_dclk_pol, 1);
VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
VOP_REG_SET(vop, output, mipi_dual_channel_en,
!!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
VOP_REG_SET(vop, output, dp_dclk_pol, 0);
VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, dp_en, 1);
break;
default:
DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
s->output_type);
}
/*
* if vop is not support RGB10 output, need force RGB10 to RGB888.
*/
if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
!(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
s->output_mode = ROCKCHIP_OUT_MODE_P888;
if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8)
VOP_REG_SET(vop, common, pre_dither_down, 1);
else
VOP_REG_SET(vop, common, pre_dither_down, 0);
if (dither_bpc == 6) {
VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO);
VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666);
VOP_REG_SET(vop, common, dither_down_en, 1);
} else {
VOP_REG_SET(vop, common, dither_down_en, 0);
}
VOP_REG_SET(vop, common, out_mode, s->output_mode);
VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
val = hact_st << 16;
val |= hact_end;
VOP_REG_SET(vop, modeset, hact_st_end, val);
VOP_REG_SET(vop, modeset, hpost_st_end, val);
VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len);
val = vact_st << 16;
val |= vact_end;
VOP_REG_SET(vop, modeset, vact_st_end, val);
VOP_REG_SET(vop, modeset, vpost_st_end, val);
VOP_REG_SET(vop, intr, line_flag_num[0], vact_end);
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
VOP_REG_SET(vop, common, standby, 0);
mutex_unlock(&vop->vop_lock);
/*
* If we have a GAMMA LUT in the state, then let's make sure
* it's updated. We might be coming out of suspend,
* which means the LUT internal memory needs to be re-written.
*/
if (crtc->state->gamma_lut)
vop_crtc_gamma_set(vop, crtc, old_state);
}
static bool vop_fs_irq_is_pending(struct vop *vop)
{
return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
}
static void vop_wait_for_irq_handler(struct vop *vop)
{
bool pending;
int ret;
/*
* Spin until frame start interrupt status bit goes low, which means
* that interrupt handler was invoked and cleared it. The timeout of
* 10 msecs is really too long, but it is just a safety measure if
* something goes really wrong. The wait will only happen in the very
* unlikely case of a vblank happening exactly at the same time and
* shouldn't exceed microseconds range.
*/
ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
!pending, 0, 10 * 1000);
if (ret)
DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
synchronize_irq(vop->irq);
}
static int vop_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct rockchip_crtc_state *s;
int afbc_planes = 0;
if (vop->lut_regs && crtc_state->color_mgmt_changed &&
crtc_state->gamma_lut) {
unsigned int len;
len = drm_color_lut_size(crtc_state->gamma_lut);
if (len != crtc->gamma_size) {
DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
len, crtc->gamma_size);
return -EINVAL;
}
}
drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
plane_state =
drm_atomic_get_plane_state(crtc_state->state, plane);
if (IS_ERR(plane_state)) {
DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
plane->name);
return PTR_ERR(plane_state);
}
if (drm_is_afbc(plane_state->fb->modifier))
++afbc_planes;
}
if (afbc_planes > 1) {
DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
return -EINVAL;
}
s = to_rockchip_crtc_state(crtc_state);
s->enable_afbc = afbc_planes > 0;
return 0;
}
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct drm_atomic_state *old_state = old_crtc_state->state;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
struct rockchip_crtc_state *s;
int i;
if (WARN_ON(!vop->is_enabled))
return;
spin_lock(&vop->reg_lock);
/* Enable AFBC if there is some AFBC window, disable otherwise. */
s = to_rockchip_crtc_state(crtc->state);
VOP_AFBC_SET(vop, enable, s->enable_afbc);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
/*
* There is a (rather unlikely) possiblity that a vblank interrupt
* fired before we set the cfg_done bit. To avoid spuriously
* signalling flip completion we need to wait for it to finish.
*/
vop_wait_for_irq_handler(vop);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
WARN_ON(vop->event);
vop->event = crtc->state->event;
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state,
new_plane_state, i) {
if (!old_plane_state->fb)
continue;
if (old_plane_state->fb == new_plane_state->fb)
continue;
drm_framebuffer_get(old_plane_state->fb);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
}
}
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
.mode_valid = vop_crtc_mode_valid,
.mode_fixup = vop_crtc_mode_fixup,
.atomic_check = vop_crtc_atomic_check,
.atomic_begin = vop_crtc_atomic_begin,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_enable = vop_crtc_atomic_enable,
.atomic_disable = vop_crtc_atomic_disable,
};
static void vop_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
}
static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *rockchip_state;
if (WARN_ON(!crtc->state))
return NULL;
rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
if (!rockchip_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
return &rockchip_state->base;
}
static void vop_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(&s->base);
kfree(s);
}
static void vop_crtc_reset(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *crtc_state =
kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (crtc->state)
vop_crtc_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
}
#ifdef CONFIG_DRM_ANALOGIX_DP
static struct drm_connector *vop_get_edp_connector(struct vop *vop)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(vop->drm_dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_list_iter_end(&conn_iter);
return connector;
}
}
drm_connector_list_iter_end(&conn_iter);
return NULL;
}
static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
const char *source_name)
{
struct vop *vop = to_vop(crtc);
struct drm_connector *connector;
int ret;
connector = vop_get_edp_connector(vop);
if (!connector)
return -EINVAL;
if (source_name && strcmp(source_name, "auto") == 0)
ret = analogix_dp_start_crc(connector);
else if (!source_name)
ret = analogix_dp_stop_crc(connector);
else
ret = -EINVAL;
return ret;
}
static int
vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
if (source_name && strcmp(source_name, "auto") != 0)
return -EINVAL;
*values_cnt = 3;
return 0;
}
#else
static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
const char *source_name)
{
return -ENODEV;
}
static int
vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
return -ENODEV;
}
#endif
static const struct drm_crtc_funcs vop_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = vop_crtc_destroy,
.reset = vop_crtc_reset,
.atomic_duplicate_state = vop_crtc_duplicate_state,
.atomic_destroy_state = vop_crtc_destroy_state,
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
{
struct vop *vop = container_of(work, struct vop, fb_unref_work);
struct drm_framebuffer *fb = val;
drm_crtc_vblank_put(&vop->crtc);
drm_framebuffer_put(fb);
}
static void vop_handle_vblank(struct vop *vop)
{
struct drm_device *drm = vop->drm_dev;
struct drm_crtc *crtc = &vop->crtc;
spin_lock(&drm->event_lock);
if (vop->event) {
drm_crtc_send_vblank_event(crtc, vop->event);
drm_crtc_vblank_put(crtc);
vop->event = NULL;
}
spin_unlock(&drm->event_lock);
if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
}
static irqreturn_t vop_isr(int irq, void *data)
{
struct vop *vop = data;
struct drm_crtc *crtc = &vop->crtc;
uint32_t active_irqs;
int ret = IRQ_NONE;
/*
* The irq is shared with the iommu. If the runtime-pm state of the
* vop-device is disabled the irq has to be targeted at the iommu.
*/
if (!pm_runtime_get_if_in_use(vop->dev))
return IRQ_NONE;
if (vop_core_clks_enable(vop)) {
DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
goto out;
}
/*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
spin_lock(&vop->irq_lock);
active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
/* Clear all active interrupt sources */
if (active_irqs)
VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
spin_unlock(&vop->irq_lock);
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
goto out_disable;
if (active_irqs & DSP_HOLD_VALID_INTR) {
complete(&vop->dsp_hold_completion);
active_irqs &= ~DSP_HOLD_VALID_INTR;
ret = IRQ_HANDLED;
}
if (active_irqs & LINE_FLAG_INTR) {
complete(&vop->line_flag_completion);
active_irqs &= ~LINE_FLAG_INTR;
ret = IRQ_HANDLED;
}
if (active_irqs & FS_INTR) {
drm_crtc_handle_vblank(crtc);
vop_handle_vblank(vop);
active_irqs &= ~FS_INTR;
ret = IRQ_HANDLED;
}
/* Unhandled irqs are spurious. */
if (active_irqs)
DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
active_irqs);
out_disable:
vop_core_clks_disable(vop);
out:
pm_runtime_put(vop->dev);
return ret;
}
static void vop_plane_add_properties(struct drm_plane *plane,
const struct vop_win_data *win_data)
{
unsigned int flags = 0;
flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0;
flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0;
if (flags)
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 | flags);
}
static int vop_create_crtc(struct vop *vop)
{
const struct vop_data *vop_data = vop->data;
struct device *dev = vop->dev;
struct drm_device *drm_dev = vop->drm_dev;
struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
struct drm_crtc *crtc = &vop->crtc;
struct device_node *port;
int ret;
int i;
/*
* Create drm_plane for primary and cursor planes first, since we need
* to pass them to drm_crtc_init_with_planes, which sets the
* "possible_crtcs" to the newly initialized crtc.
*/
for (i = 0; i < vop_data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win_data = vop_win->data;
if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
win_data->type != DRM_PLANE_TYPE_CURSOR)
continue;
ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
0, &vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
win_data->phy->format_modifiers,
win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
ret);
goto err_cleanup_planes;
}
plane = &vop_win->base;
drm_plane_helper_add(plane, &plane_helper_funcs);
vop_plane_add_properties(plane, win_data);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
primary = plane;
else if (plane->type == DRM_PLANE_TYPE_CURSOR)
cursor = plane;
}
ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
&vop_crtc_funcs, NULL);
if (ret)
goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
if (vop->lut_regs) {
drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
}
/*
* Create drm_planes for overlay windows with possible_crtcs restricted
* to the newly created crtc.
*/
for (i = 0; i < vop_data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win_data = vop_win->data;
unsigned long possible_crtcs = drm_crtc_mask(crtc);
if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
continue;
ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
possible_crtcs,
&vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
win_data->phy->format_modifiers,
win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
ret);
goto err_cleanup_crtc;
}
drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
vop_plane_add_properties(&vop_win->base, win_data);
}
port = of_get_child_by_name(dev->of_node, "port");
if (!port) {
DRM_DEV_ERROR(vop->dev, "no port node found in %pOF\n",
dev->of_node);
ret = -ENOENT;
goto err_cleanup_crtc;
}
drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
vop_fb_unref_worker);
init_completion(&vop->dsp_hold_completion);
init_completion(&vop->line_flag_completion);
crtc->port = port;
ret = drm_self_refresh_helper_init(crtc);
if (ret)
DRM_DEV_DEBUG_KMS(vop->dev,
"Failed to init %s with SR helpers %d, ignoring\n",
crtc->name, ret);
return 0;
err_cleanup_crtc:
drm_crtc_cleanup(crtc);
err_cleanup_planes:
list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
head)
drm_plane_cleanup(plane);
return ret;
}
static void vop_destroy_crtc(struct vop *vop)
{
struct drm_crtc *crtc = &vop->crtc;
struct drm_device *drm_dev = vop->drm_dev;
struct drm_plane *plane, *tmp;
drm_self_refresh_helper_cleanup(crtc);
of_node_put(crtc->port);
/*
* We need to cleanup the planes now. Why?
*
* The planes are "&vop->win[i].base". That means the memory is
* all part of the big "struct vop" chunk of memory. That memory
* was devm allocated and associated with this component. We need to
* free it ourselves before vop_unbind() finishes.
*/
list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
head)
vop_plane_destroy(plane);
/*
* Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
* references the CRTC.
*/
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&vop->fb_unref_work);
}
static int vop_initial(struct vop *vop)
{
struct reset_control *ahb_rst;
int i, ret;
vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
if (IS_ERR(vop->hclk)) {
DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n");
return PTR_ERR(vop->hclk);
}
vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
if (IS_ERR(vop->aclk)) {
DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n");
return PTR_ERR(vop->aclk);
}
vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
if (IS_ERR(vop->dclk)) {
DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n");
return PTR_ERR(vop->dclk);
}
ret = pm_runtime_resume_and_get(vop->dev);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
ret = clk_prepare(vop->dclk);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n");
goto err_put_pm_runtime;
}
/* Enable both the hclk and aclk to setup the vop */
ret = clk_prepare_enable(vop->hclk);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n");
goto err_unprepare_dclk;
}
ret = clk_prepare_enable(vop->aclk);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n");
goto err_disable_hclk;
}
/*
* do hclk_reset, reset all vop registers.
*/
ahb_rst = devm_reset_control_get(vop->dev, "ahb");
if (IS_ERR(ahb_rst)) {
DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n");
ret = PTR_ERR(ahb_rst);
goto err_disable_aclk;
}
reset_control_assert(ahb_rst);
usleep_range(10, 20);
reset_control_deassert(ahb_rst);
VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
for (i = 0; i < vop->len; i += sizeof(u32))
vop->regsbak[i / 4] = readl_relaxed(vop->regs + i);
VOP_REG_SET(vop, misc, global_regdone_en, 1);
VOP_REG_SET(vop, common, dsp_blank, 0);
for (i = 0; i < vop->data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
int channel = i * 2 + 1;
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
vop_win_disable(vop, vop_win);
VOP_WIN_SET(vop, win, gate, 1);
}
vop_cfg_done(vop);
/*
* do dclk_reset, let all config take affect.
*/
vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
if (IS_ERR(vop->dclk_rst)) {
DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n");
ret = PTR_ERR(vop->dclk_rst);
goto err_disable_aclk;
}
reset_control_assert(vop->dclk_rst);
usleep_range(10, 20);
reset_control_deassert(vop->dclk_rst);
clk_disable(vop->hclk);
clk_disable(vop->aclk);
vop->is_enabled = false;
pm_runtime_put_sync(vop->dev);
return 0;
err_disable_aclk:
clk_disable_unprepare(vop->aclk);
err_disable_hclk:
clk_disable_unprepare(vop->hclk);
err_unprepare_dclk:
clk_unprepare(vop->dclk);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
}
/*
* Initialize the vop->win array elements.
*/
static void vop_win_init(struct vop *vop)
{
const struct vop_data *vop_data = vop->data;
unsigned int i;
for (i = 0; i < vop_data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win_data = &vop_data->win[i];
vop_win->data = win_data;
vop_win->vop = vop;
if (vop_data->win_yuv2yuv)
vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i];
}
}
/**
* rockchip_drm_wait_vact_end
* @crtc: CRTC to enable line flag
* @mstimeout: millisecond for timeout
*
* Wait for vact_end line flag irq or timeout.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
{
struct vop *vop = to_vop(crtc);
unsigned long jiffies_left;
int ret = 0;
if (!crtc || !vop->is_enabled)
return -ENODEV;
mutex_lock(&vop->vop_lock);
if (mstimeout <= 0) {
ret = -EINVAL;
goto out;
}
if (vop_line_flag_irq_is_enabled(vop)) {
ret = -EBUSY;
goto out;
}
reinit_completion(&vop->line_flag_completion);
vop_line_flag_irq_enable(vop);
jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
msecs_to_jiffies(mstimeout));
vop_line_flag_irq_disable(vop);
if (jiffies_left == 0) {
DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
ret = -ETIMEDOUT;
goto out;
}
out:
mutex_unlock(&vop->vop_lock);
return ret;
}
EXPORT_SYMBOL(rockchip_drm_wait_vact_end);
static int vop_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct vop_data *vop_data;
struct drm_device *drm_dev = data;
struct vop *vop;
struct resource *res;
int ret, irq;
vop_data = of_device_get_match_data(dev);
if (!vop_data)
return -ENODEV;
/* Allocate vop struct and its vop_win array */
vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size),
GFP_KERNEL);
if (!vop)
return -ENOMEM;
vop->dev = dev;
vop->data = vop_data;
vop->drm_dev = drm_dev;
dev_set_drvdata(dev, vop);
vop_win_init(vop);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vop->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
vop->len = resource_size(res);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
if (vop_data->lut_size != 1024 && vop_data->lut_size != 256) {
DRM_DEV_ERROR(dev, "unsupported gamma LUT size %d\n", vop_data->lut_size);
return -EINVAL;
}
vop->lut_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop->lut_regs))
return PTR_ERR(vop->lut_regs);
}
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DRM_DEV_ERROR(dev, "cannot find irq for vop\n");
return irq;
}
vop->irq = (unsigned int)irq;
spin_lock_init(&vop->reg_lock);
spin_lock_init(&vop->irq_lock);
mutex_init(&vop->vop_lock);
ret = vop_create_crtc(vop);
if (ret)
return ret;
pm_runtime_enable(&pdev->dev);
ret = vop_initial(vop);
if (ret < 0) {
DRM_DEV_ERROR(&pdev->dev,
"cannot initial vop dev - err %d\n", ret);
goto err_disable_pm_runtime;
}
ret = devm_request_irq(dev, vop->irq, vop_isr,
IRQF_SHARED, dev_name(dev), vop);
if (ret)
goto err_disable_pm_runtime;
if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev, 0);
if (IS_ERR(vop->rgb)) {
ret = PTR_ERR(vop->rgb);
goto err_disable_pm_runtime;
}
}
rockchip_drm_dma_init_device(drm_dev, dev);
return 0;
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
vop_destroy_crtc(vop);
return ret;
}
static void vop_unbind(struct device *dev, struct device *master, void *data)
{
struct vop *vop = dev_get_drvdata(dev);
if (vop->rgb)
rockchip_rgb_fini(vop->rgb);
pm_runtime_disable(dev);
vop_destroy_crtc(vop);
clk_unprepare(vop->aclk);
clk_unprepare(vop->hclk);
clk_unprepare(vop->dclk);
}
const struct component_ops vop_component_ops = {
.bind = vop_bind,
.unbind = vop_unbind,
};
EXPORT_SYMBOL_GPL(vop_component_ops);
| linux-master | drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author: Chris Zhong <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/extcon.h>
#include <linux/firmware.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <sound/hdmi-codec.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
#include "rockchip_drm_vop.h"
static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector)
{
return container_of(connector, struct cdn_dp_device, connector);
}
static inline struct cdn_dp_device *encoder_to_dp(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct cdn_dp_device, encoder);
}
#define GRF_SOC_CON9 0x6224
#define DP_SEL_VOP_LIT BIT(12)
#define GRF_SOC_CON26 0x6268
#define DPTX_HPD_SEL (3 << 12)
#define DPTX_HPD_DEL (2 << 12)
#define DPTX_HPD_SEL_MASK (3 << 28)
#define CDN_FW_TIMEOUT_MS (64 * 1000)
#define CDN_DPCD_TIMEOUT_MS 5000
#define CDN_DP_FIRMWARE "rockchip/dptx.bin"
MODULE_FIRMWARE(CDN_DP_FIRMWARE);
struct cdn_dp_data {
u8 max_phy;
};
static struct cdn_dp_data rk3399_cdn_dp = {
.max_phy = 2,
};
static const struct of_device_id cdn_dp_dt_ids[] = {
{ .compatible = "rockchip,rk3399-cdn-dp",
.data = (void *)&rk3399_cdn_dp },
{}
};
MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
static int cdn_dp_grf_write(struct cdn_dp_device *dp,
unsigned int reg, unsigned int val)
{
int ret;
ret = clk_prepare_enable(dp->grf_clk);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
return ret;
}
ret = regmap_write(dp->grf, reg, val);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(dp->grf_clk);
return ret;
}
clk_disable_unprepare(dp->grf_clk);
return 0;
}
static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
{
int ret;
unsigned long rate;
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
goto err_pclk;
}
ret = clk_prepare_enable(dp->core_clk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
goto err_core_clk;
}
ret = pm_runtime_get_sync(dp->dev);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
goto err_pm_runtime_get;
}
reset_control_assert(dp->core_rst);
reset_control_assert(dp->dptx_rst);
reset_control_assert(dp->apb_rst);
reset_control_deassert(dp->core_rst);
reset_control_deassert(dp->dptx_rst);
reset_control_deassert(dp->apb_rst);
rate = clk_get_rate(dp->core_clk);
if (!rate) {
DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
ret = -EINVAL;
goto err_set_rate;
}
cdn_dp_set_fw_clk(dp, rate);
cdn_dp_clock_reset(dp);
return 0;
err_set_rate:
pm_runtime_put(dp->dev);
err_pm_runtime_get:
clk_disable_unprepare(dp->core_clk);
err_core_clk:
clk_disable_unprepare(dp->pclk);
err_pclk:
return ret;
}
static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
{
pm_runtime_put_sync(dp->dev);
clk_disable_unprepare(dp->pclk);
clk_disable_unprepare(dp->core_clk);
}
static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
{
struct extcon_dev *edev = port->extcon;
union extcon_property_value property;
int dptx;
u8 lanes;
dptx = extcon_get_state(edev, EXTCON_DISP_DP);
if (dptx > 0) {
extcon_get_property(edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS, &property);
if (property.intval)
lanes = 2;
else
lanes = 4;
} else {
lanes = 0;
}
return lanes;
}
static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
{
int ret;
u8 value;
*sink_count = 0;
ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
if (ret)
return ret;
*sink_count = DP_GET_SINK_COUNT(value);
return 0;
}
static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
{
struct cdn_dp_port *port;
int i, lanes;
for (i = 0; i < dp->ports; i++) {
port = dp->port[i];
lanes = cdn_dp_get_port_lanes(port);
if (lanes)
return port;
}
return NULL;
}
static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
{
unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
struct cdn_dp_port *port;
u8 sink_count = 0;
if (dp->active_port < 0 || dp->active_port >= dp->ports) {
DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
return false;
}
port = dp->port[dp->active_port];
/*
* Attempt to read sink count, retry in case the sink may not be ready.
*
* Sinks are *supposed* to come up within 1ms from an off state, but
* some docks need more time to power up.
*/
while (time_before(jiffies, timeout)) {
if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
return false;
if (!cdn_dp_get_sink_count(dp, &sink_count))
return sink_count ? true : false;
usleep_range(5000, 10000);
}
DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
return false;
}
static enum drm_connector_status
cdn_dp_connector_detect(struct drm_connector *connector, bool force)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
enum drm_connector_status status = connector_status_disconnected;
mutex_lock(&dp->lock);
if (dp->connected)
status = connector_status_connected;
mutex_unlock(&dp->lock);
return status;
}
static void cdn_dp_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
.detect = cdn_dp_connector_detect,
.destroy = cdn_dp_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int cdn_dp_connector_get_modes(struct drm_connector *connector)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct edid *edid;
int ret = 0;
mutex_lock(&dp->lock);
edid = dp->edid;
if (edid) {
DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
dp->sink_has_audio = drm_detect_monitor_audio(edid);
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
mutex_unlock(&dp->lock);
return ret;
}
static enum drm_mode_status
cdn_dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
u32 requested, actual, rate, sink_max, source_max = 0;
u8 lanes, bpc;
/* If DP is disconnected, every mode is invalid */
if (!dp->connected)
return MODE_BAD;
switch (display_info->bpc) {
case 10:
bpc = 10;
break;
case 6:
bpc = 6;
break;
default:
bpc = 8;
break;
}
requested = mode->clock * bpc * 3 / 1000;
source_max = dp->lanes;
sink_max = drm_dp_max_lane_count(dp->dpcd);
lanes = min(source_max, sink_max);
source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
sink_max = drm_dp_max_link_rate(dp->dpcd);
rate = min(source_max, sink_max);
actual = rate * lanes / 100;
/* efficiency is about 0.8 */
actual = actual * 8 / 10;
if (requested > actual) {
DRM_DEV_DEBUG_KMS(dp->dev,
"requested=%d, actual=%d, clock=%d\n",
requested, actual, mode->clock);
return MODE_CLOCK_HIGH;
}
return MODE_OK;
}
static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
.get_modes = cdn_dp_connector_get_modes,
.mode_valid = cdn_dp_connector_mode_valid,
};
static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
{
int ret;
const u32 *iram_data, *dram_data;
const struct firmware *fw = dp->fw;
const struct cdn_firmware_header *hdr;
hdr = (struct cdn_firmware_header *)fw->data;
if (fw->size != le32_to_cpu(hdr->size_bytes)) {
DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
return -EINVAL;
}
iram_data = (const u32 *)(fw->data + hdr->header_size);
dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
dram_data, hdr->dram_size);
if (ret)
return ret;
ret = cdn_dp_set_firmware_active(dp, true);
if (ret) {
DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
return ret;
}
return cdn_dp_event_config(dp);
}
static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
{
int ret;
if (!cdn_dp_check_sink_connection(dp))
return -ENODEV;
ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
DP_RECEIVER_CAP_SIZE);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
return ret;
}
kfree(dp->edid);
dp->edid = drm_do_get_edid(&dp->connector,
cdn_dp_get_edid_block, dp);
return 0;
}
static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
{
union extcon_property_value property;
int ret;
if (!port->phy_enabled) {
ret = phy_power_on(port->phy);
if (ret) {
DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
ret);
goto err_phy;
}
port->phy_enabled = true;
}
ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
goto err_power_on;
}
ret = cdn_dp_get_hpd_status(dp);
if (ret <= 0) {
if (!ret)
DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
goto err_power_on;
}
ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY, &property);
if (ret) {
DRM_DEV_ERROR(dp->dev, "get property failed\n");
goto err_power_on;
}
port->lanes = cdn_dp_get_port_lanes(port);
ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
if (ret) {
DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
ret);
goto err_power_on;
}
dp->active_port = port->id;
return 0;
err_power_on:
if (phy_power_off(port->phy))
DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
else
port->phy_enabled = false;
err_phy:
cdn_dp_grf_write(dp, GRF_SOC_CON26,
DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
return ret;
}
static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
struct cdn_dp_port *port)
{
int ret;
if (port->phy_enabled) {
ret = phy_power_off(port->phy);
if (ret) {
DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
return ret;
}
}
port->phy_enabled = false;
port->lanes = 0;
dp->active_port = -1;
return 0;
}
static int cdn_dp_disable(struct cdn_dp_device *dp)
{
int ret, i;
if (!dp->active)
return 0;
for (i = 0; i < dp->ports; i++)
cdn_dp_disable_phy(dp, dp->port[i]);
ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
ret);
return ret;
}
cdn_dp_set_firmware_active(dp, false);
cdn_dp_clk_disable(dp);
dp->active = false;
dp->max_lanes = 0;
dp->max_rate = 0;
if (!dp->connected) {
kfree(dp->edid);
dp->edid = NULL;
}
return 0;
}
static int cdn_dp_enable(struct cdn_dp_device *dp)
{
int ret, i, lanes;
struct cdn_dp_port *port;
port = cdn_dp_connected_port(dp);
if (!port) {
DRM_DEV_ERROR(dp->dev,
"Can't enable without connection\n");
return -ENODEV;
}
if (dp->active)
return 0;
ret = cdn_dp_clk_enable(dp);
if (ret)
return ret;
ret = cdn_dp_firmware_init(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
goto err_clk_disable;
}
/* only enable the port that connected with downstream device */
for (i = port->id; i < dp->ports; i++) {
port = dp->port[i];
lanes = cdn_dp_get_port_lanes(port);
if (lanes) {
ret = cdn_dp_enable_phy(dp, port);
if (ret)
continue;
ret = cdn_dp_get_sink_capability(dp);
if (ret) {
cdn_dp_disable_phy(dp, port);
} else {
dp->active = true;
dp->lanes = port->lanes;
return 0;
}
}
}
err_clk_disable:
cdn_dp_clk_disable(dp);
return ret;
}
static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted)
{
struct cdn_dp_device *dp = encoder_to_dp(encoder);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
switch (display_info->bpc) {
case 10:
video->color_depth = 10;
break;
case 6:
video->color_depth = 6;
break;
default:
video->color_depth = 8;
break;
}
video->color_fmt = PXL_RGB;
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
drm_mode_copy(&dp->mode, adjusted);
}
static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
struct cdn_dp_port *port = cdn_dp_connected_port(dp);
u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
if (!port || !dp->max_rate || !dp->max_lanes)
return false;
if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE)) {
DRM_ERROR("Failed to get link status\n");
return false;
}
/* if link training is requested we should perform it always */
return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
}
static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp,
bool plugged)
{
if (dp->codec_dev)
dp->plugged_cb(dp->codec_dev, plugged);
}
static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
{
struct cdn_dp_device *dp = encoder_to_dp(encoder);
int ret, val;
ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
return;
}
DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
(ret) ? "LIT" : "BIG");
if (ret)
val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
else
val = DP_SEL_VOP_LIT << 16;
ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
if (ret)
return;
mutex_lock(&dp->lock);
ret = cdn_dp_enable(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
ret);
goto out;
}
if (!cdn_dp_check_link_status(dp)) {
ret = cdn_dp_train_link(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
goto out;
}
}
ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
goto out;
}
ret = cdn_dp_config_video(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
goto out;
}
ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
goto out;
}
cdn_dp_audio_handle_plugged_change(dp, true);
out:
mutex_unlock(&dp->lock);
}
static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
{
struct cdn_dp_device *dp = encoder_to_dp(encoder);
int ret;
mutex_lock(&dp->lock);
cdn_dp_audio_handle_plugged_change(dp, false);
if (dp->active) {
ret = cdn_dp_disable(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
ret);
}
}
mutex_unlock(&dp->lock);
/*
* In the following 2 cases, we need to run the event_work to re-enable
* the DP:
* 1. If there is not just one port device is connected, and remove one
* device from a port, the DP will be disabled here, at this case,
* run the event_work to re-open DP for the other port.
* 2. If re-training or re-config failed, the DP will be disabled here.
* run the event_work to re-connect it.
*/
if (!dp->connected && cdn_dp_connected_port(dp))
schedule_work(&dp->event_work);
}
static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
return 0;
}
static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
.mode_set = cdn_dp_encoder_mode_set,
.enable = cdn_dp_encoder_enable,
.disable = cdn_dp_encoder_disable,
.atomic_check = cdn_dp_encoder_atomic_check,
};
static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
{
struct device *dev = dp->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
return PTR_ERR(dp->grf);
}
dp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dp->regs)) {
DRM_DEV_ERROR(dev, "ioremap reg failed\n");
return PTR_ERR(dp->regs);
}
dp->core_clk = devm_clk_get(dev, "core-clk");
if (IS_ERR(dp->core_clk)) {
DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
return PTR_ERR(dp->core_clk);
}
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
DRM_DEV_ERROR(dev, "cannot get pclk\n");
return PTR_ERR(dp->pclk);
}
dp->spdif_clk = devm_clk_get(dev, "spdif");
if (IS_ERR(dp->spdif_clk)) {
DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
return PTR_ERR(dp->spdif_clk);
}
dp->grf_clk = devm_clk_get(dev, "grf");
if (IS_ERR(dp->grf_clk)) {
DRM_DEV_ERROR(dev, "cannot get grf clk\n");
return PTR_ERR(dp->grf_clk);
}
dp->spdif_rst = devm_reset_control_get(dev, "spdif");
if (IS_ERR(dp->spdif_rst)) {
DRM_DEV_ERROR(dev, "no spdif reset control found\n");
return PTR_ERR(dp->spdif_rst);
}
dp->dptx_rst = devm_reset_control_get(dev, "dptx");
if (IS_ERR(dp->dptx_rst)) {
DRM_DEV_ERROR(dev, "no uphy reset control found\n");
return PTR_ERR(dp->dptx_rst);
}
dp->core_rst = devm_reset_control_get(dev, "core");
if (IS_ERR(dp->core_rst)) {
DRM_DEV_ERROR(dev, "no core reset control found\n");
return PTR_ERR(dp->core_rst);
}
dp->apb_rst = devm_reset_control_get(dev, "apb");
if (IS_ERR(dp->apb_rst)) {
DRM_DEV_ERROR(dev, "no apb reset control found\n");
return PTR_ERR(dp->apb_rst);
}
return 0;
}
static int cdn_dp_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
struct audio_info audio = {
.sample_width = params->sample_width,
.sample_rate = params->sample_rate,
.channels = params->channels,
};
int ret;
mutex_lock(&dp->lock);
if (!dp->active) {
ret = -ENODEV;
goto out;
}
switch (daifmt->fmt) {
case HDMI_I2S:
audio.format = AFMT_I2S;
break;
case HDMI_SPDIF:
audio.format = AFMT_SPDIF;
break;
default:
DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
ret = -EINVAL;
goto out;
}
ret = cdn_dp_audio_config(dp, &audio);
if (!ret)
dp->audio_info = audio;
out:
mutex_unlock(&dp->lock);
return ret;
}
static void cdn_dp_audio_shutdown(struct device *dev, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
int ret;
mutex_lock(&dp->lock);
if (!dp->active)
goto out;
ret = cdn_dp_audio_stop(dp, &dp->audio_info);
if (!ret)
dp->audio_info.format = AFMT_UNUSED;
out:
mutex_unlock(&dp->lock);
}
static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
bool enable, int direction)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
int ret;
mutex_lock(&dp->lock);
if (!dp->active) {
ret = -ENODEV;
goto out;
}
ret = cdn_dp_audio_mute(dp, enable);
out:
mutex_unlock(&dp->lock);
return ret;
}
static int cdn_dp_audio_get_eld(struct device *dev, void *data,
u8 *buf, size_t len)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
return 0;
}
static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
mutex_lock(&dp->lock);
dp->plugged_cb = fn;
dp->codec_dev = codec_dev;
cdn_dp_audio_handle_plugged_change(dp, dp->connected);
mutex_unlock(&dp->lock);
return 0;
}
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = cdn_dp_audio_hw_params,
.audio_shutdown = cdn_dp_audio_shutdown,
.mute_stream = cdn_dp_audio_mute_stream,
.get_eld = cdn_dp_audio_get_eld,
.hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
.no_capture_mute = 1,
};
static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
struct device *dev)
{
struct hdmi_codec_pdata codec_data = {
.i2s = 1,
.spdif = 1,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
};
dp->audio_pdev = platform_device_register_data(
dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
&codec_data, sizeof(codec_data));
return PTR_ERR_OR_ZERO(dp->audio_pdev);
}
static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
{
int ret;
unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
unsigned long sleep = 1000;
WARN_ON(!mutex_is_locked(&dp->lock));
if (dp->fw_loaded)
return 0;
/* Drop the lock before getting the firmware to avoid blocking boot */
mutex_unlock(&dp->lock);
while (time_before(jiffies, timeout)) {
ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
if (ret == -ENOENT) {
msleep(sleep);
sleep *= 2;
continue;
} else if (ret) {
DRM_DEV_ERROR(dp->dev,
"failed to request firmware: %d\n", ret);
goto out;
}
dp->fw_loaded = true;
ret = 0;
goto out;
}
DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
ret = -ETIMEDOUT;
out:
mutex_lock(&dp->lock);
return ret;
}
static void cdn_dp_pd_event_work(struct work_struct *work)
{
struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
event_work);
struct drm_connector *connector = &dp->connector;
enum drm_connector_status old_status;
int ret;
mutex_lock(&dp->lock);
if (dp->suspended)
goto out;
ret = cdn_dp_request_firmware(dp);
if (ret)
goto out;
dp->connected = true;
/* Not connected, notify userspace to disable the block */
if (!cdn_dp_connected_port(dp)) {
DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
dp->connected = false;
/* Connected but not enabled, enable the block */
} else if (!dp->active) {
DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
ret = cdn_dp_enable(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
dp->connected = false;
}
/* Enabled and connected to a dongle without a sink, notify userspace */
} else if (!cdn_dp_check_sink_connection(dp)) {
DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
dp->connected = false;
/* Enabled and connected with a sink, re-train if requested */
} else if (!cdn_dp_check_link_status(dp)) {
unsigned int rate = dp->max_rate;
unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
ret = cdn_dp_train_link(dp);
if (ret) {
dp->connected = false;
DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
goto out;
}
/* If training result is changed, update the video config */
if (mode->clock &&
(rate != dp->max_rate || lanes != dp->max_lanes)) {
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
DRM_DEV_ERROR(dp->dev,
"Failed to config video %d\n",
ret);
}
}
}
out:
mutex_unlock(&dp->lock);
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
if (old_status != connector->status)
drm_kms_helper_hotplug_event(dp->drm_dev);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
unsigned long event, void *priv)
{
struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
event_nb);
struct cdn_dp_device *dp = port->dp;
/*
* It would be nice to be able to just do the work inline right here.
* However, we need to make a bunch of calls that might sleep in order
* to turn on the block/phy, so use a worker instead.
*/
schedule_work(&dp->event_work);
return NOTIFY_DONE;
}
static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder;
struct drm_connector *connector;
struct cdn_dp_port *port;
struct drm_device *drm_dev = data;
int ret, i;
ret = cdn_dp_parse_dt(dp);
if (ret < 0)
return ret;
dp->drm_dev = drm_dev;
dp->connected = false;
dp->active = false;
dp->active_port = -1;
dp->fw_loaded = false;
INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
encoder = &dp->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
ret = drm_simple_encoder_init(drm_dev, encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
}
drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
connector = &dp->connector;
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->dpms = DRM_MODE_DPMS_OFF;
ret = drm_connector_init(drm_dev, connector,
&cdn_dp_atomic_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
DRM_ERROR("failed to initialize connector with drm\n");
goto err_free_encoder;
}
drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("failed to attach connector and encoder\n");
goto err_free_connector;
}
for (i = 0; i < dp->ports; i++) {
port = dp->port[i];
port->event_nb.notifier_call = cdn_dp_pd_event;
ret = devm_extcon_register_notifier(dp->dev, port->extcon,
EXTCON_DISP_DP,
&port->event_nb);
if (ret) {
DRM_DEV_ERROR(dev,
"register EXTCON_DISP_DP notifier err\n");
goto err_free_connector;
}
}
pm_runtime_enable(dev);
schedule_work(&dp->event_work);
return 0;
err_free_connector:
drm_connector_cleanup(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
return ret;
}
static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder.encoder;
struct drm_connector *connector = &dp->connector;
cancel_work_sync(&dp->event_work);
cdn_dp_encoder_disable(encoder);
encoder->funcs->destroy(encoder);
connector->funcs->destroy(connector);
pm_runtime_disable(dev);
if (dp->fw_loaded)
release_firmware(dp->fw);
kfree(dp->edid);
dp->edid = NULL;
}
static const struct component_ops cdn_dp_component_ops = {
.bind = cdn_dp_bind,
.unbind = cdn_dp_unbind,
};
static int cdn_dp_suspend(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&dp->lock);
if (dp->active)
ret = cdn_dp_disable(dp);
dp->suspended = true;
mutex_unlock(&dp->lock);
return ret;
}
static __maybe_unused int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
mutex_lock(&dp->lock);
dp->suspended = false;
if (dp->fw_loaded)
schedule_work(&dp->event_work);
mutex_unlock(&dp->lock);
return 0;
}
static int cdn_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct cdn_dp_data *dp_data;
struct cdn_dp_port *port;
struct cdn_dp_device *dp;
struct extcon_dev *extcon;
struct phy *phy;
int i;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
dp->dev = dev;
match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
dp_data = (struct cdn_dp_data *)match->data;
for (i = 0; i < dp_data->max_phy; i++) {
extcon = extcon_get_edev_by_phandle(dev, i);
phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
if (PTR_ERR(extcon) == -EPROBE_DEFER ||
PTR_ERR(phy) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (IS_ERR(extcon) || IS_ERR(phy))
continue;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->extcon = extcon;
port->phy = phy;
port->dp = dp;
port->id = i;
dp->port[dp->ports++] = port;
}
if (!dp->ports) {
DRM_DEV_ERROR(dev, "missing extcon or phy\n");
return -EINVAL;
}
mutex_init(&dp->lock);
dev_set_drvdata(dev, dp);
cdn_dp_audio_codec_init(dp, dev);
return component_add(dev, &cdn_dp_component_ops);
}
static void cdn_dp_remove(struct platform_device *pdev)
{
struct cdn_dp_device *dp = platform_get_drvdata(pdev);
platform_device_unregister(dp->audio_pdev);
cdn_dp_suspend(dp->dev);
component_del(&pdev->dev, &cdn_dp_component_ops);
}
static void cdn_dp_shutdown(struct platform_device *pdev)
{
struct cdn_dp_device *dp = platform_get_drvdata(pdev);
cdn_dp_suspend(dp->dev);
}
static const struct dev_pm_ops cdn_dp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
cdn_dp_resume)
};
struct platform_driver cdn_dp_driver = {
.probe = cdn_dp_probe,
.remove_new = cdn_dp_remove,
.shutdown = cdn_dp_shutdown,
.driver = {
.name = "cdn-dp",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(cdn_dp_dt_ids),
.pm = &cdn_dp_pm_ops,
},
};
| linux-master | drivers/gpu/drm/rockchip/cdn-dp-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Zheng Yang <[email protected]>
*/
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "rk3066_hdmi.h"
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define DEFAULT_PLLA_RATE 30000000
struct hdmi_data_info {
int vic; /* The CEA Video ID (VIC) of the current drm display mode. */
unsigned int enc_out_format;
unsigned int colorimetry;
};
struct rk3066_hdmi_i2c {
struct i2c_adapter adap;
u8 ddc_addr;
u8 segment_addr;
u8 stat;
struct mutex i2c_lock; /* For i2c operation. */
struct completion cmpltn;
};
struct rk3066_hdmi {
struct device *dev;
struct drm_device *drm_dev;
struct regmap *grf_regmap;
int irq;
struct clk *hclk;
void __iomem *regs;
struct drm_connector connector;
struct rockchip_encoder encoder;
struct rk3066_hdmi_i2c *i2c;
struct i2c_adapter *ddc;
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
struct drm_display_mode previous_mode;
};
static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct rk3066_hdmi, encoder);
}
static struct rk3066_hdmi *connector_to_rk3066_hdmi(struct drm_connector *connector)
{
return container_of(connector, struct rk3066_hdmi, connector);
}
static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
{
return readl_relaxed(hdmi->regs + offset);
}
static inline void hdmi_writeb(struct rk3066_hdmi *hdmi, u16 offset, u32 val)
{
writel_relaxed(val, hdmi->regs + offset);
}
static inline void hdmi_modb(struct rk3066_hdmi *hdmi, u16 offset,
u32 msk, u32 val)
{
u8 temp = hdmi_readb(hdmi, offset) & ~msk;
temp |= val & msk;
hdmi_writeb(hdmi, offset, temp);
}
static void rk3066_hdmi_i2c_init(struct rk3066_hdmi *hdmi)
{
int ddc_bus_freq;
ddc_bus_freq = (hdmi->tmdsclk >> 2) / HDMI_SCL_RATE;
hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
/* Clear the EDID interrupt flag and mute the interrupt. */
hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
hdmi_writeb(hdmi, HDMI_INTR_STATUS1, HDMI_INTR_EDID_MASK);
}
static inline u8 rk3066_hdmi_get_power_mode(struct rk3066_hdmi *hdmi)
{
return hdmi_readb(hdmi, HDMI_SYS_CTRL) & HDMI_SYS_POWER_MODE_MASK;
}
static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
{
u8 current_mode, next_mode;
u8 i = 0;
current_mode = rk3066_hdmi_get_power_mode(hdmi);
DRM_DEV_DEBUG(hdmi->dev, "mode :%d\n", mode);
DRM_DEV_DEBUG(hdmi->dev, "current_mode :%d\n", current_mode);
if (current_mode == mode)
return;
do {
if (current_mode > mode) {
next_mode = current_mode / 2;
} else {
if (current_mode < HDMI_SYS_POWER_MODE_A)
next_mode = HDMI_SYS_POWER_MODE_A;
else
next_mode = current_mode * 2;
}
DRM_DEV_DEBUG(hdmi->dev, "%d: next_mode :%d\n", i, next_mode);
if (next_mode != HDMI_SYS_POWER_MODE_D) {
hdmi_modb(hdmi, HDMI_SYS_CTRL,
HDMI_SYS_POWER_MODE_MASK, next_mode);
} else {
hdmi_writeb(hdmi, HDMI_SYS_CTRL,
HDMI_SYS_POWER_MODE_D |
HDMI_SYS_PLL_RESET_MASK);
usleep_range(90, 100);
hdmi_writeb(hdmi, HDMI_SYS_CTRL,
HDMI_SYS_POWER_MODE_D |
HDMI_SYS_PLLB_RESET);
usleep_range(90, 100);
hdmi_writeb(hdmi, HDMI_SYS_CTRL,
HDMI_SYS_POWER_MODE_D);
}
current_mode = next_mode;
i = i + 1;
} while ((next_mode != mode) && (i < 5));
/*
* When the IP controller isn't configured with accurate video timing,
* DDC_CLK should be equal to the PLLA frequency, which is 30MHz,
* so we need to init the TMDS rate to the PCLK rate and reconfigure
* the DDC clock.
*/
if (mode < HDMI_SYS_POWER_MODE_D)
hdmi->tmdsclk = DEFAULT_PLLA_RATE;
}
static int
rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
union hdmi_infoframe *frame, u32 frame_index,
u32 mask, u32 disable, u32 enable)
{
if (mask)
hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
if (setup_rc >= 0) {
u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
ssize_t rc, i;
rc = hdmi_infoframe_pack(frame, packed_frame,
sizeof(packed_frame));
if (rc < 0)
return rc;
for (i = 0; i < rc; i++)
hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
packed_frame[i]);
if (mask)
hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
}
return setup_rc;
}
static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
int rc;
rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&hdmi->connector, mode);
if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
HDMI_INFOFRAME_AVI, 0, 0, 0);
}
static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
struct drm_display_mode *mode)
{
int value, vsync_offset;
/* Set the details for the external polarity and interlace mode. */
value = HDMI_EXT_VIDEO_SET_EN;
value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
HDMI_VIDEO_HSYNC_ACTIVE_HIGH : HDMI_VIDEO_HSYNC_ACTIVE_LOW;
value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
HDMI_VIDEO_VSYNC_ACTIVE_HIGH : HDMI_VIDEO_VSYNC_ACTIVE_LOW;
value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
HDMI_VIDEO_MODE_INTERLACE : HDMI_VIDEO_MODE_PROGRESSIVE;
if (hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3)
vsync_offset = 6;
else
vsync_offset = 0;
value |= vsync_offset << HDMI_VIDEO_VSYNC_OFFSET_SHIFT;
hdmi_writeb(hdmi, HDMI_EXT_VIDEO_PARA, value);
/* Set the details for the external video timing. */
value = mode->htotal;
hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_H, (value >> 8) & 0xFF);
value = mode->htotal - mode->hdisplay;
hdmi_writeb(hdmi, HDMI_EXT_HBLANK_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_EXT_HBLANK_H, (value >> 8) & 0xFF);
value = mode->htotal - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_EXT_HDELAY_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_EXT_HDELAY_H, (value >> 8) & 0xFF);
value = mode->hsync_end - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_EXT_HDURATION_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_EXT_HDURATION_H, (value >> 8) & 0xFF);
value = mode->vtotal;
hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_H, (value >> 8) & 0xFF);
value = mode->vtotal - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_EXT_VBLANK_L, value & 0xFF);
value = mode->vtotal - mode->vsync_start + vsync_offset;
hdmi_writeb(hdmi, HDMI_EXT_VDELAY, value & 0xFF);
value = mode->vsync_end - mode->vsync_start;
hdmi_writeb(hdmi, HDMI_EXT_VDURATION, value & 0xFF);
return 0;
}
static void
rk3066_hdmi_phy_write(struct rk3066_hdmi *hdmi, u16 offset, u8 value)
{
hdmi_writeb(hdmi, offset, value);
hdmi_modb(hdmi, HDMI_SYS_CTRL,
HDMI_SYS_PLL_RESET_MASK, HDMI_SYS_PLL_RESET);
usleep_range(90, 100);
hdmi_modb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_PLL_RESET_MASK, 0);
usleep_range(900, 1000);
}
static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
{
/* TMDS uses the same frequency as dclk. */
hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x22);
/*
* The semi-public documentation does not describe the hdmi registers
* used by the function rk3066_hdmi_phy_write(), so we keep using
* these magic values for now.
*/
if (hdmi->tmdsclk > 100000000) {
rk3066_hdmi_phy_write(hdmi, 0x158, 0x0E);
rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
rk3066_hdmi_phy_write(hdmi, 0x168, 0xDA);
rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA1);
rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
rk3066_hdmi_phy_write(hdmi, 0x174, 0x22);
rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
} else if (hdmi->tmdsclk > 50000000) {
rk3066_hdmi_phy_write(hdmi, 0x158, 0x06);
rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
rk3066_hdmi_phy_write(hdmi, 0x168, 0xCA);
rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA3);
rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
} else {
rk3066_hdmi_phy_write(hdmi, 0x158, 0x02);
rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
rk3066_hdmi_phy_write(hdmi, 0x168, 0xC2);
rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA2);
rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
}
}
static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct drm_display_info *display = &hdmi->connector.display_info;
hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
if (hdmi->hdmi_data.vic == 6 || hdmi->hdmi_data.vic == 7 ||
hdmi->hdmi_data.vic == 21 || hdmi->hdmi_data.vic == 22 ||
hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3 ||
hdmi->hdmi_data.vic == 17 || hdmi->hdmi_data.vic == 18)
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
hdmi->tmdsclk = mode->clock * 1000;
/* Mute video and audio output. */
hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, HDMI_VIDEO_AUDIO_DISABLE_MASK,
HDMI_AUDIO_DISABLE | HDMI_VIDEO_DISABLE);
/* Set power state to mode B. */
if (rk3066_hdmi_get_power_mode(hdmi) != HDMI_SYS_POWER_MODE_B)
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
/* Input video mode is RGB 24 bit. Use external data enable signal. */
hdmi_modb(hdmi, HDMI_AV_CTRL1,
HDMI_VIDEO_DE_MASK, HDMI_VIDEO_EXTERNAL_DE);
hdmi_writeb(hdmi, HDMI_VIDEO_CTRL1,
HDMI_VIDEO_OUTPUT_RGB444 |
HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT |
HDMI_VIDEO_INPUT_COLOR_RGB);
hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x20);
rk3066_hdmi_config_video_timing(hdmi, mode);
if (display->is_hdmi) {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
HDMI_VIDEO_MODE_HDMI);
rk3066_hdmi_config_avi(hdmi, mode);
} else {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
}
rk3066_hdmi_config_phy(hdmi);
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_E);
/*
* When the IP controller is configured with accurate video
* timing, the TMDS clock source should be switched to
* DCLK_LCDC, so we need to init the TMDS rate to the pixel mode
* clock rate and reconfigure the DDC clock.
*/
rk3066_hdmi_i2c_init(hdmi);
/* Unmute video output. */
hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
HDMI_VIDEO_AUDIO_DISABLE_MASK, HDMI_AUDIO_DISABLE);
return 0;
}
static void
rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
/* Store the display mode for plugin/DPMS poweron events. */
drm_mode_copy(&hdmi->previous_mode, adj_mode);
}
static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
int mux, val;
mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (mux)
val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
else
val = HDMI_VIDEO_SEL << 16;
regmap_write(hdmi->grf_regmap, GRF_SOC_CON0, val);
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
rk3066_hdmi_setup(hdmi, &hdmi->previous_mode);
}
static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_E) {
hdmi_writeb(hdmi, HDMI_VIDEO_CTRL2,
HDMI_VIDEO_AUDIO_DISABLE_MASK);
hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
HDMI_AUDIO_CP_LOGIC_RESET_MASK,
HDMI_AUDIO_CP_LOGIC_RESET);
usleep_range(500, 510);
}
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
}
static bool
rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
return true;
}
static int
rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
s->output_mode = ROCKCHIP_OUT_MODE_P888;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
return 0;
}
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.enable = rk3066_hdmi_encoder_enable,
.disable = rk3066_hdmi_encoder_disable,
.mode_fixup = rk3066_hdmi_encoder_mode_fixup,
.mode_set = rk3066_hdmi_encoder_mode_set,
.atomic_check = rk3066_hdmi_encoder_atomic_check,
};
static enum drm_connector_status
rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
connector_status_connected : connector_status_disconnected;
}
static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
struct edid *edid;
int ret = 0;
if (!hdmi->ddc)
return 0;
edid = drm_get_edid(connector, hdmi->ddc);
if (edid) {
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return ret;
}
static enum drm_mode_status
rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
if (vic > 1)
return MODE_OK;
else
return MODE_BAD;
}
static struct drm_encoder *
rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
{
struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
return &hdmi->encoder.encoder;
}
static int
rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
if (maxX > 1920)
maxX = 1920;
if (maxY > 1080)
maxY = 1080;
return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
}
static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
.fill_modes = rk3066_hdmi_probe_single_connector_modes,
.detect = rk3066_hdmi_connector_detect,
.destroy = rk3066_hdmi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const
struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
.get_modes = rk3066_hdmi_connector_get_modes,
.mode_valid = rk3066_hdmi_connector_mode_valid,
.best_encoder = rk3066_hdmi_connector_best_encoder,
};
static int
rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
{
struct drm_encoder *encoder = &hdmi->encoder.encoder;
struct device *dev = hdmi->dev;
encoder->possible_crtcs =
drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
* not been registered yet. Defer probing, and hope that
* the required CRTC is added later.
*/
if (encoder->possible_crtcs == 0)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(&hdmi->connector,
&rk3066_hdmi_connector_helper_funcs);
drm_connector_init_with_ddc(drm, &hdmi->connector,
&rk3066_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
{
struct rk3066_hdmi *hdmi = dev_id;
irqreturn_t ret = IRQ_NONE;
u8 interrupt;
if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_A)
hdmi_writeb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_POWER_MODE_B);
interrupt = hdmi_readb(hdmi, HDMI_INTR_STATUS1);
if (interrupt)
hdmi_writeb(hdmi, HDMI_INTR_STATUS1, interrupt);
if (interrupt & HDMI_INTR_EDID_MASK) {
hdmi->i2c->stat = interrupt;
complete(&hdmi->i2c->cmpltn);
}
if (interrupt & (HDMI_INTR_HOTPLUG | HDMI_INTR_MSENS))
ret = IRQ_WAKE_THREAD;
return ret;
}
static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
{
struct rk3066_hdmi *hdmi = dev_id;
drm_helper_hpd_irq_event(hdmi->connector.dev);
return IRQ_HANDLED;
}
static int rk3066_hdmi_i2c_read(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
{
int length = msgs->len;
u8 *buf = msgs->buf;
int ret;
ret = wait_for_completion_timeout(&hdmi->i2c->cmpltn, HZ / 10);
if (!ret || hdmi->i2c->stat & HDMI_INTR_EDID_ERR)
return -EAGAIN;
while (length--)
*buf++ = hdmi_readb(hdmi, HDMI_DDC_READ_FIFO_ADDR);
return 0;
}
static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
{
/*
* The DDC module only supports read EDID message, so
* we assume that each word write to this i2c adapter
* should be the offset of the EDID word address.
*/
if (msgs->len != 1 ||
(msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
return -EINVAL;
reinit_completion(&hdmi->i2c->cmpltn);
if (msgs->addr == DDC_SEGMENT_ADDR)
hdmi->i2c->segment_addr = msgs->buf[0];
if (msgs->addr == DDC_ADDR)
hdmi->i2c->ddc_addr = msgs->buf[0];
/* Set edid fifo first address. */
hdmi_writeb(hdmi, HDMI_EDID_FIFO_ADDR, 0x00);
/* Set edid word address 0x00/0x80. */
hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
/* Set edid segment pointer. */
hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
return 0;
}
static int rk3066_hdmi_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct rk3066_hdmi *hdmi = i2c_get_adapdata(adap);
struct rk3066_hdmi_i2c *i2c = hdmi->i2c;
int i, ret = 0;
mutex_lock(&i2c->i2c_lock);
rk3066_hdmi_i2c_init(hdmi);
/* Unmute HDMI EDID interrupt. */
hdmi_modb(hdmi, HDMI_INTR_MASK1,
HDMI_INTR_EDID_MASK, HDMI_INTR_EDID_MASK);
i2c->stat = 0;
for (i = 0; i < num; i++) {
DRM_DEV_DEBUG(hdmi->dev,
"xfer: num: %d/%d, len: %d, flags: %#x\n",
i + 1, num, msgs[i].len, msgs[i].flags);
if (msgs[i].flags & I2C_M_RD)
ret = rk3066_hdmi_i2c_read(hdmi, &msgs[i]);
else
ret = rk3066_hdmi_i2c_write(hdmi, &msgs[i]);
if (ret < 0)
break;
}
if (!ret)
ret = num;
/* Mute HDMI EDID interrupt. */
hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
mutex_unlock(&i2c->i2c_lock);
return ret;
}
static u32 rk3066_hdmi_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm rk3066_hdmi_algorithm = {
.master_xfer = rk3066_hdmi_i2c_xfer,
.functionality = rk3066_hdmi_i2c_func,
};
static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
{
struct i2c_adapter *adap;
struct rk3066_hdmi_i2c *i2c;
int ret;
i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return ERR_PTR(-ENOMEM);
mutex_init(&i2c->i2c_lock);
init_completion(&i2c->cmpltn);
adap = &i2c->adap;
adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
adap->algo = &rk3066_hdmi_algorithm;
strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
adap->name);
devm_kfree(hdmi->dev, i2c);
return ERR_PTR(ret);
}
hdmi->i2c = i2c;
DRM_DEV_DEBUG(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
return adap;
}
static int rk3066_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct rk3066_hdmi *hdmi;
int irq;
int ret;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->dev = dev;
hdmi->drm_dev = drm;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
hdmi->hclk = devm_clk_get(dev, "hclk");
if (IS_ERR(hdmi->hclk)) {
DRM_DEV_ERROR(dev, "unable to get HDMI hclk clock\n");
return PTR_ERR(hdmi->hclk);
}
ret = clk_prepare_enable(hdmi->hclk);
if (ret) {
DRM_DEV_ERROR(dev, "cannot enable HDMI hclk clock: %d\n", ret);
return ret;
}
hdmi->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(hdmi->grf_regmap)) {
DRM_DEV_ERROR(dev, "unable to get rockchip,grf\n");
ret = PTR_ERR(hdmi->grf_regmap);
goto err_disable_hclk;
}
/* internal hclk = hdmi_hclk / 25 */
hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
if (IS_ERR(hdmi->ddc)) {
ret = PTR_ERR(hdmi->ddc);
hdmi->ddc = NULL;
goto err_disable_hclk;
}
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
usleep_range(999, 1000);
hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
hdmi_writeb(hdmi, HDMI_INTR_MASK2, 0);
hdmi_writeb(hdmi, HDMI_INTR_MASK3, 0);
hdmi_writeb(hdmi, HDMI_INTR_MASK4, 0);
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
ret = rk3066_hdmi_register(drm, hdmi);
if (ret)
goto err_disable_i2c;
dev_set_drvdata(dev, hdmi);
ret = devm_request_threaded_irq(dev, irq, rk3066_hdmi_hardirq,
rk3066_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
if (ret) {
DRM_DEV_ERROR(dev, "failed to request hdmi irq: %d\n", ret);
goto err_cleanup_hdmi;
}
return 0;
err_cleanup_hdmi:
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
err_disable_i2c:
i2c_put_adapter(hdmi->ddc);
err_disable_hclk:
clk_disable_unprepare(hdmi->hclk);
return ret;
}
static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
i2c_put_adapter(hdmi->ddc);
clk_disable_unprepare(hdmi->hclk);
}
static const struct component_ops rk3066_hdmi_ops = {
.bind = rk3066_hdmi_bind,
.unbind = rk3066_hdmi_unbind,
};
static int rk3066_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &rk3066_hdmi_ops);
}
static void rk3066_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &rk3066_hdmi_ops);
}
static const struct of_device_id rk3066_hdmi_dt_ids[] = {
{ .compatible = "rockchip,rk3066-hdmi" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
struct platform_driver rk3066_hdmi_driver = {
.probe = rk3066_hdmi_probe,
.remove_new = rk3066_hdmi_remove,
.driver = {
.name = "rockchip-rk3066-hdmi",
.of_match_table = rk3066_hdmi_dt_ids,
},
};
| linux-master | drivers/gpu/drm/rockchip/rk3066_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*/
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define RK3228_GRF_SOC_CON2 0x0408
#define RK3228_HDMI_SDAIN_MSK BIT(14)
#define RK3228_HDMI_SCLIN_MSK BIT(13)
#define RK3228_GRF_SOC_CON6 0x0418
#define RK3228_HDMI_HPD_VSEL BIT(6)
#define RK3228_HDMI_SDA_VSEL BIT(5)
#define RK3228_HDMI_SCL_VSEL BIT(4)
#define RK3288_GRF_SOC_CON6 0x025C
#define RK3288_HDMI_LCDC_SEL BIT(4)
#define RK3328_GRF_SOC_CON2 0x0408
#define RK3328_HDMI_SDAIN_MSK BIT(11)
#define RK3328_HDMI_SCLIN_MSK BIT(10)
#define RK3328_HDMI_HPD_IOE BIT(2)
#define RK3328_GRF_SOC_CON3 0x040c
/* need to be unset if hdmi or i2c should control voltage */
#define RK3328_HDMI_SDA5V_GRF BIT(15)
#define RK3328_HDMI_SCL5V_GRF BIT(14)
#define RK3328_HDMI_HPD5V_GRF BIT(13)
#define RK3328_HDMI_CEC5V_GRF BIT(12)
#define RK3328_GRF_SOC_CON4 0x0410
#define RK3328_HDMI_HPD_SARADC BIT(13)
#define RK3328_HDMI_CEC_5V BIT(11)
#define RK3328_HDMI_SDA_5V BIT(10)
#define RK3328_HDMI_SCL_5V BIT(9)
#define RK3328_HDMI_HPD_5V BIT(8)
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_HDMI_LCDC_SEL BIT(6)
#define RK3568_GRF_VO_CON1 0x0364
#define RK3568_HDMI_SDAIN_MSK BIT(15)
#define RK3568_HDMI_SCLIN_MSK BIT(14)
#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
/**
* struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips
* @lcdsel_grf_reg: grf register offset of lcdc select
* @lcdsel_big: reg value of selecting vop big for HDMI
* @lcdsel_lit: reg value of selecting vop little for HDMI
*/
struct rockchip_hdmi_chip_data {
int lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
};
struct rockchip_hdmi {
struct device *dev;
struct regmap *regmap;
struct rockchip_encoder encoder;
const struct rockchip_hdmi_chip_data *chip_data;
const struct dw_hdmi_plat_data *plat_data;
struct clk *ref_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
struct regulator *avdd_0v9;
struct regulator *avdd_1v8;
struct phy *phy;
};
static struct rockchip_hdmi *to_rockchip_hdmi(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct rockchip_hdmi, encoder);
}
static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
{
27000000, {
{ 0x00b3, 0x0000},
{ 0x2153, 0x0000},
{ 0x40f3, 0x0000}
},
}, {
36000000, {
{ 0x00b3, 0x0000},
{ 0x2153, 0x0000},
{ 0x40f3, 0x0000}
},
}, {
40000000, {
{ 0x00b3, 0x0000},
{ 0x2153, 0x0000},
{ 0x40f3, 0x0000}
},
}, {
54000000, {
{ 0x0072, 0x0001},
{ 0x2142, 0x0001},
{ 0x40a2, 0x0001},
},
}, {
65000000, {
{ 0x0072, 0x0001},
{ 0x2142, 0x0001},
{ 0x40a2, 0x0001},
},
}, {
66000000, {
{ 0x013e, 0x0003},
{ 0x217e, 0x0002},
{ 0x4061, 0x0002}
},
}, {
74250000, {
{ 0x0072, 0x0001},
{ 0x2145, 0x0002},
{ 0x4061, 0x0002}
},
}, {
83500000, {
{ 0x0072, 0x0001},
},
}, {
108000000, {
{ 0x0051, 0x0002},
{ 0x2145, 0x0002},
{ 0x4061, 0x0002}
},
}, {
106500000, {
{ 0x0051, 0x0002},
{ 0x2145, 0x0002},
{ 0x4061, 0x0002}
},
}, {
146250000, {
{ 0x0051, 0x0002},
{ 0x2145, 0x0002},
{ 0x4061, 0x0002}
},
}, {
148500000, {
{ 0x0051, 0x0003},
{ 0x214c, 0x0003},
{ 0x4064, 0x0003}
},
}, {
340000000, {
{ 0x0040, 0x0003 },
{ 0x3b4c, 0x0003 },
{ 0x5a64, 0x0003 },
},
}, {
~0UL, {
{ 0x00a0, 0x000a },
{ 0x2001, 0x000f },
{ 0x4002, 0x000f },
},
}
};
static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
{
40000000, { 0x0018, 0x0018, 0x0018 },
}, {
65000000, { 0x0028, 0x0028, 0x0028 },
}, {
66000000, { 0x0038, 0x0038, 0x0038 },
}, {
74250000, { 0x0028, 0x0038, 0x0038 },
}, {
83500000, { 0x0028, 0x0038, 0x0038 },
}, {
146250000, { 0x0038, 0x0038, 0x0038 },
}, {
148500000, { 0x0000, 0x0038, 0x0038 },
}, {
600000000, { 0x0000, 0x0000, 0x0000 },
}, {
~0UL, { 0x0000, 0x0000, 0x0000},
}
};
static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
/*pixelclk symbol term vlev*/
{ 74250000, 0x8009, 0x0004, 0x0272},
{ 148500000, 0x802b, 0x0004, 0x028d},
{ 297000000, 0x8039, 0x0005, 0x028d},
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
{
struct device_node *np = hdmi->dev->of_node;
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
hdmi->ref_clk = devm_clk_get_optional(hdmi->dev, "ref");
if (!hdmi->ref_clk)
hdmi->ref_clk = devm_clk_get_optional(hdmi->dev, "vpll");
if (PTR_ERR(hdmi->ref_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->ref_clk)) {
DRM_DEV_ERROR(hdmi->dev, "failed to get reference clock\n");
return PTR_ERR(hdmi->ref_clk);
}
hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf");
if (PTR_ERR(hdmi->grf_clk) == -ENOENT) {
hdmi->grf_clk = NULL;
} else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->grf_clk)) {
DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
return PTR_ERR(hdmi->grf_clk);
}
hdmi->avdd_0v9 = devm_regulator_get(hdmi->dev, "avdd-0v9");
if (IS_ERR(hdmi->avdd_0v9))
return PTR_ERR(hdmi->avdd_0v9);
hdmi->avdd_1v8 = devm_regulator_get(hdmi->dev, "avdd-1v8");
if (IS_ERR(hdmi->avdd_1v8))
return PTR_ERR(hdmi->avdd_1v8);
return 0;
}
static enum drm_mode_status
dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct rockchip_hdmi *hdmi = data;
const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg;
int pclk = mode->clock * 1000;
bool exact_match = hdmi->plat_data->phy_force_vendor;
int i;
if (hdmi->ref_clk) {
int rpclk = clk_round_rate(hdmi->ref_clk, pclk);
if (abs(rpclk - pclk) > pclk / 1000)
return MODE_NOCLOCK;
}
for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) {
/*
* For vendor specific phys force an exact match of the pixelclock
* to preserve the original behaviour of the driver.
*/
if (exact_match && pclk == mpll_cfg[i].mpixelclock)
return MODE_OK;
/*
* The Synopsys phy can work with pixelclocks up to the value given
* in the corresponding mpll_cfg entry.
*/
if (!exact_match && pclk <= mpll_cfg[i].mpixelclock)
return MODE_OK;
}
return MODE_BAD;
}
static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
{
}
static bool
dw_hdmi_rockchip_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
return true;
}
static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
clk_set_rate(hdmi->ref_clk, adj_mode->clock * 1000);
}
static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
u32 val;
int ret;
if (hdmi->chip_data->lcdsel_grf_reg < 0)
return;
ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (ret)
val = hdmi->chip_data->lcdsel_lit;
else
val = hdmi->chip_data->lcdsel_big;
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
ret ? "LIT" : "BIG");
}
static int
dw_hdmi_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
return 0;
}
static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
.mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
.mode_set = dw_hdmi_rockchip_encoder_mode_set,
.enable = dw_hdmi_rockchip_encoder_enable,
.disable = dw_hdmi_rockchip_encoder_disable,
.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
};
static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
return phy_power_on(hdmi->phy);
}
static void dw_hdmi_rockchip_genphy_disable(struct dw_hdmi *dw_hdmi, void *data)
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
phy_power_off(hdmi->phy);
}
static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
regmap_write(hdmi->regmap,
RK3228_GRF_SOC_CON6,
HIWORD_UPDATE(RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
RK3228_HDMI_SCL_VSEL,
RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
RK3228_HDMI_SCL_VSEL));
regmap_write(hdmi->regmap,
RK3228_GRF_SOC_CON2,
HIWORD_UPDATE(RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK,
RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK));
}
static enum drm_connector_status
dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
enum drm_connector_status status;
status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
if (status == connector_status_connected)
regmap_write(hdmi->regmap,
RK3328_GRF_SOC_CON4,
HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
else
regmap_write(hdmi->regmap,
RK3328_GRF_SOC_CON4,
HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
RK3328_HDMI_SCL_5V));
return status;
}
static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
/* Enable and map pins to 3V grf-controlled io-voltage */
regmap_write(hdmi->regmap,
RK3328_GRF_SOC_CON4,
HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
RK3328_HDMI_HPD_5V));
regmap_write(hdmi->regmap,
RK3328_GRF_SOC_CON3,
HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
RK3328_HDMI_HPD5V_GRF |
RK3328_HDMI_CEC5V_GRF));
regmap_write(hdmi->regmap,
RK3328_GRF_SOC_CON2,
HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
RK3328_HDMI_HPD_IOE));
}
static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
.init = dw_hdmi_rockchip_genphy_init,
.disable = dw_hdmi_rockchip_genphy_disable,
.read_hpd = dw_hdmi_phy_read_hpd,
.update_hpd = dw_hdmi_phy_update_hpd,
.setup_hpd = dw_hdmi_rk3228_setup_hpd,
};
static struct rockchip_hdmi_chip_data rk3228_chip_data = {
.lcdsel_grf_reg = -1,
};
static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3228_chip_data,
.phy_ops = &rk3228_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
};
static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
};
static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3288_chip_data,
};
static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
.init = dw_hdmi_rockchip_genphy_init,
.disable = dw_hdmi_rockchip_genphy_disable,
.read_hpd = dw_hdmi_rk3328_read_hpd,
.update_hpd = dw_hdmi_phy_update_hpd,
.setup_hpd = dw_hdmi_rk3328_setup_hpd,
};
static struct rockchip_hdmi_chip_data rk3328_chip_data = {
.lcdsel_grf_reg = -1,
};
static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3328_chip_data,
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
.use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
};
static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3399_chip_data,
.use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3568_chip_data = {
.lcdsel_grf_reg = -1,
};
static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3568_chip_data,
.use_drm_infoframe = true,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
{ .compatible = "rockchip,rk3228-dw-hdmi",
.data = &rk3228_hdmi_drv_data
},
{ .compatible = "rockchip,rk3288-dw-hdmi",
.data = &rk3288_hdmi_drv_data
},
{ .compatible = "rockchip,rk3328-dw-hdmi",
.data = &rk3328_hdmi_drv_data
},
{ .compatible = "rockchip,rk3399-dw-hdmi",
.data = &rk3399_hdmi_drv_data
},
{ .compatible = "rockchip,rk3568-dw-hdmi",
.data = &rk3568_hdmi_drv_data
},
{},
};
MODULE_DEVICE_TABLE(of, dw_hdmi_rockchip_dt_ids);
static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_hdmi_plat_data *plat_data;
const struct of_device_id *match;
struct drm_device *drm = data;
struct drm_encoder *encoder;
struct rockchip_hdmi *hdmi;
int ret;
if (!pdev->dev.of_node)
return -ENODEV;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node);
plat_data = devm_kmemdup(&pdev->dev, match->data,
sizeof(*plat_data), GFP_KERNEL);
if (!plat_data)
return -ENOMEM;
hdmi->dev = &pdev->dev;
hdmi->plat_data = plat_data;
hdmi->chip_data = plat_data->phy_data;
plat_data->phy_data = hdmi;
plat_data->priv_data = hdmi;
encoder = &hdmi->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
rockchip_drm_encoder_set_crtc_endpoint_id(&hdmi->encoder,
dev->of_node, 0, 0);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
* not been registered yet. Defer probing, and hope that
* the required CRTC is added later.
*/
if (encoder->possible_crtcs == 0)
return -EPROBE_DEFER;
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n");
return ret;
}
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
return ret;
}
ret = regulator_enable(hdmi->avdd_0v9);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "failed to enable avdd0v9: %d\n", ret);
goto err_avdd_0v9;
}
ret = regulator_enable(hdmi->avdd_1v8);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "failed to enable avdd1v8: %d\n", ret);
goto err_avdd_1v8;
}
ret = clk_prepare_enable(hdmi->ref_clk);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI reference clock: %d\n",
ret);
goto err_clk;
}
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
RK3568_HDMI_SCLIN_MSK,
RK3568_HDMI_SDAIN_MSK |
RK3568_HDMI_SCLIN_MSK));
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
/*
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
* which would have called the encoder cleanup. Do it manually.
*/
if (IS_ERR(hdmi->hdmi)) {
ret = PTR_ERR(hdmi->hdmi);
goto err_bind;
}
return 0;
err_bind:
drm_encoder_cleanup(encoder);
clk_disable_unprepare(hdmi->ref_clk);
err_clk:
regulator_disable(hdmi->avdd_1v8);
err_avdd_1v8:
regulator_disable(hdmi->avdd_0v9);
err_avdd_0v9:
return ret;
}
static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
drm_encoder_cleanup(&hdmi->encoder.encoder);
clk_disable_unprepare(hdmi->ref_clk);
regulator_disable(hdmi->avdd_1v8);
regulator_disable(hdmi->avdd_0v9);
}
static const struct component_ops dw_hdmi_rockchip_ops = {
.bind = dw_hdmi_rockchip_bind,
.unbind = dw_hdmi_rockchip_unbind,
};
static int dw_hdmi_rockchip_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &dw_hdmi_rockchip_ops);
}
static void dw_hdmi_rockchip_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dw_hdmi_rockchip_ops);
}
static int __maybe_unused dw_hdmi_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_resume(hdmi->hdmi);
return 0;
}
static const struct dev_pm_ops dw_hdmi_rockchip_pm = {
SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_rockchip_resume)
};
struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
.probe = dw_hdmi_rockchip_probe,
.remove_new = dw_hdmi_rockchip_remove,
.driver = {
.name = "dwhdmi-rockchip",
.pm = &dw_hdmi_rockchip_pm,
.of_match_table = dw_hdmi_rockchip_dt_ids,
},
};
| linux-master | drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:
* Mark Yao <[email protected]>
* Sandy Huang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#include "rockchip_lvds.h"
#define DISPLAY_OUTPUT_RGB 0
#define DISPLAY_OUTPUT_LVDS 1
#define DISPLAY_OUTPUT_DUAL_LVDS 2
struct rockchip_lvds;
/**
* struct rockchip_lvds_soc_data - rockchip lvds Soc private data
* @probe: LVDS platform probe function
* @helper_funcs: LVDS connector helper functions
*/
struct rockchip_lvds_soc_data {
int (*probe)(struct platform_device *pdev, struct rockchip_lvds *lvds);
const struct drm_encoder_helper_funcs *helper_funcs;
};
struct rockchip_lvds {
struct device *dev;
void __iomem *regs;
struct regmap *grf;
struct clk *pclk;
struct phy *dphy;
const struct rockchip_lvds_soc_data *soc_data;
int output; /* rgb lvds or dual lvds output */
int format; /* vesa or jeida format */
struct drm_device *drm_dev;
struct drm_panel *panel;
struct drm_bridge *bridge;
struct drm_connector connector;
struct rockchip_encoder encoder;
struct dev_pin_info *pins;
};
static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *connector)
{
return container_of(connector, struct rockchip_lvds, connector);
}
static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct rockchip_lvds, encoder);
}
static inline void rk3288_writel(struct rockchip_lvds *lvds, u32 offset,
u32 val)
{
writel_relaxed(val, lvds->regs + offset);
if (lvds->output == DISPLAY_OUTPUT_LVDS)
return;
writel_relaxed(val, lvds->regs + offset + RK3288_LVDS_CH1_OFFSET);
}
static inline int rockchip_lvds_name_to_format(const char *s)
{
if (strncmp(s, "jeida-18", 8) == 0)
return LVDS_JEIDA_18;
else if (strncmp(s, "jeida-24", 8) == 0)
return LVDS_JEIDA_24;
else if (strncmp(s, "vesa-24", 7) == 0)
return LVDS_VESA_24;
return -EINVAL;
}
static inline int rockchip_lvds_name_to_output(const char *s)
{
if (strncmp(s, "rgb", 3) == 0)
return DISPLAY_OUTPUT_RGB;
else if (strncmp(s, "lvds", 4) == 0)
return DISPLAY_OUTPUT_LVDS;
else if (strncmp(s, "duallvds", 8) == 0)
return DISPLAY_OUTPUT_DUAL_LVDS;
return -EINVAL;
}
static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
{
struct rockchip_lvds *lvds = connector_to_lvds(connector);
struct drm_panel *panel = lvds->panel;
return drm_panel_get_modes(panel, connector);
}
static const
struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
.get_modes = rockchip_lvds_connector_get_modes,
};
static int
rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
s->output_mode = ROCKCHIP_OUT_MODE_P888;
s->output_type = DRM_MODE_CONNECTOR_LVDS;
return 0;
}
static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
{
int ret;
u32 val;
ret = clk_enable(lvds->pclk);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
return ret;
}
ret = pm_runtime_resume_and_get(lvds->dev);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
clk_disable(lvds->pclk);
return ret;
}
val = RK3288_LVDS_CH0_REG0_LANE4_EN | RK3288_LVDS_CH0_REG0_LANE3_EN |
RK3288_LVDS_CH0_REG0_LANE2_EN | RK3288_LVDS_CH0_REG0_LANE1_EN |
RK3288_LVDS_CH0_REG0_LANE0_EN;
if (lvds->output == DISPLAY_OUTPUT_RGB) {
val |= RK3288_LVDS_CH0_REG0_TTL_EN |
RK3288_LVDS_CH0_REG0_LANECK_EN;
rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
RK3288_LVDS_PLL_FBDIV_REG2(0x46));
rk3288_writel(lvds, RK3288_LVDS_CH0_REG4,
RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
rk3288_writel(lvds, RK3288_LVDS_CH0_REG5,
RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
} else {
val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
RK3288_LVDS_CH0_REG0_LANECK_EN;
rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
rk3288_writel(lvds, RK3288_LVDS_CH0_REG1,
RK3288_LVDS_CH0_REG1_LANECK_BIAS |
RK3288_LVDS_CH0_REG1_LANE4_BIAS |
RK3288_LVDS_CH0_REG1_LANE3_BIAS |
RK3288_LVDS_CH0_REG1_LANE2_BIAS |
RK3288_LVDS_CH0_REG1_LANE1_BIAS |
RK3288_LVDS_CH0_REG1_LANE0_BIAS);
rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
RK3288_LVDS_CH0_REG2_RESERVE_ON |
RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
RK3288_LVDS_PLL_FBDIV_REG2(0x46));
rk3288_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
rk3288_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
}
rk3288_writel(lvds, RK3288_LVDS_CH0_REG3,
RK3288_LVDS_PLL_FBDIV_REG3(0x46));
rk3288_writel(lvds, RK3288_LVDS_CH0_REGD,
RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
rk3288_writel(lvds, RK3288_LVDS_CH0_REG20,
RK3288_LVDS_CH0_REG20_LSB);
rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
RK3288_LVDS_CFG_REGC_PLL_ENABLE);
rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
RK3288_LVDS_CFG_REG21_TX_ENABLE);
return 0;
}
static void rk3288_lvds_poweroff(struct rockchip_lvds *lvds)
{
int ret;
u32 val;
rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
RK3288_LVDS_CFG_REG21_TX_ENABLE);
rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
RK3288_LVDS_CFG_REGC_PLL_ENABLE);
val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
val |= val << 16;
ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
if (ret != 0)
DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
pm_runtime_put(lvds->dev);
clk_disable(lvds->pclk);
}
static int rk3288_lvds_grf_config(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
u8 pin_dclk = (mode->flags & DRM_MODE_FLAG_PCSYNC) ? 1 : 0;
u32 val;
int ret;
/* iomux to LCD data/sync mode */
if (lvds->output == DISPLAY_OUTPUT_RGB)
if (lvds->pins && !IS_ERR(lvds->pins->default_state))
pinctrl_select_state(lvds->pins->p,
lvds->pins->default_state);
val = lvds->format | LVDS_CH0_EN;
if (lvds->output == DISPLAY_OUTPUT_RGB)
val |= LVDS_TTL_EN | LVDS_CH1_EN;
else if (lvds->output == DISPLAY_OUTPUT_DUAL_LVDS)
val |= LVDS_DUAL | LVDS_CH1_EN;
if ((mode->htotal - mode->hsync_start) & 0x01)
val |= LVDS_START_PHASE_RST_1;
val |= (pin_dclk << 8) | (pin_hsync << 9);
val |= (0xffff << 16);
ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
if (ret)
DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
return ret;
}
static int rk3288_lvds_set_vop_source(struct rockchip_lvds *lvds,
struct drm_encoder *encoder)
{
u32 val;
int ret;
ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
if (ret < 0)
return ret;
val = RK3288_LVDS_SOC_CON6_SEL_VOP_LIT << 16;
if (ret)
val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON6, val);
if (ret < 0)
return ret;
return 0;
}
static void rk3288_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
int ret;
drm_panel_prepare(lvds->panel);
ret = rk3288_lvds_poweron(lvds);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
drm_panel_unprepare(lvds->panel);
return;
}
ret = rk3288_lvds_grf_config(encoder, mode);
if (ret) {
DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
drm_panel_unprepare(lvds->panel);
return;
}
ret = rk3288_lvds_set_vop_source(lvds, encoder);
if (ret) {
DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
drm_panel_unprepare(lvds->panel);
return;
}
drm_panel_enable(lvds->panel);
}
static void rk3288_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
drm_panel_disable(lvds->panel);
rk3288_lvds_poweroff(lvds);
drm_panel_unprepare(lvds->panel);
}
static int px30_lvds_poweron(struct rockchip_lvds *lvds)
{
int ret;
ret = pm_runtime_resume_and_get(lvds->dev);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
/* Enable LVDS mode */
ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
if (ret)
pm_runtime_put(lvds->dev);
return ret;
}
static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
{
regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
PX30_LVDS_MODE_EN(0) | PX30_LVDS_P2S_EN(0));
pm_runtime_put(lvds->dev);
}
static int px30_lvds_grf_config(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
if (lvds->output != DISPLAY_OUTPUT_LVDS) {
DRM_DEV_ERROR(lvds->dev, "Unsupported display output %d\n",
lvds->output);
return -EINVAL;
}
/* Set format */
return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
PX30_LVDS_FORMAT(lvds->format),
PX30_LVDS_FORMAT(lvds->format));
}
static int px30_lvds_set_vop_source(struct rockchip_lvds *lvds,
struct drm_encoder *encoder)
{
int vop;
vop = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
if (vop < 0)
return vop;
return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
PX30_LVDS_VOP_SEL(1),
PX30_LVDS_VOP_SEL(vop));
}
static void px30_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
int ret;
drm_panel_prepare(lvds->panel);
ret = px30_lvds_poweron(lvds);
if (ret) {
DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
drm_panel_unprepare(lvds->panel);
return;
}
ret = px30_lvds_grf_config(encoder, mode);
if (ret) {
DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
drm_panel_unprepare(lvds->panel);
return;
}
ret = px30_lvds_set_vop_source(lvds, encoder);
if (ret) {
DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
drm_panel_unprepare(lvds->panel);
return;
}
drm_panel_enable(lvds->panel);
}
static void px30_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
drm_panel_disable(lvds->panel);
px30_lvds_poweroff(lvds);
drm_panel_unprepare(lvds->panel);
}
static const
struct drm_encoder_helper_funcs rk3288_lvds_encoder_helper_funcs = {
.enable = rk3288_lvds_encoder_enable,
.disable = rk3288_lvds_encoder_disable,
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
static const
struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
.enable = px30_lvds_encoder_enable,
.disable = px30_lvds_encoder_disable,
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
int ret;
lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
if (IS_ERR(lvds->pclk)) {
DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
return PTR_ERR(lvds->pclk);
}
lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
GFP_KERNEL);
if (!lvds->pins)
return -ENOMEM;
lvds->pins->p = devm_pinctrl_get(lvds->dev);
if (IS_ERR(lvds->pins->p)) {
DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
devm_kfree(lvds->dev, lvds->pins);
lvds->pins = NULL;
} else {
lvds->pins->default_state =
pinctrl_lookup_state(lvds->pins->p, "lcdc");
if (IS_ERR(lvds->pins->default_state)) {
DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
devm_kfree(lvds->dev, lvds->pins);
lvds->pins = NULL;
}
}
ret = clk_prepare(lvds->pclk);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
return ret;
}
return 0;
}
static int px30_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
int ret;
/* MSB */
ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
PX30_LVDS_MSBSEL(1),
PX30_LVDS_MSBSEL(1));
if (ret)
return ret;
/* PHY */
lvds->dphy = devm_phy_get(&pdev->dev, "dphy");
if (IS_ERR(lvds->dphy))
return PTR_ERR(lvds->dphy);
ret = phy_init(lvds->dphy);
if (ret)
return ret;
ret = phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
if (ret)
return ret;
return phy_power_on(lvds->dphy);
}
static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
.probe = rk3288_lvds_probe,
.helper_funcs = &rk3288_lvds_encoder_helper_funcs,
};
static const struct rockchip_lvds_soc_data px30_lvds_data = {
.probe = px30_lvds_probe,
.helper_funcs = &px30_lvds_encoder_helper_funcs,
};
static const struct of_device_id rockchip_lvds_dt_ids[] = {
{
.compatible = "rockchip,rk3288-lvds",
.data = &rk3288_lvds_data
},
{
.compatible = "rockchip,px30-lvds",
.data = &px30_lvds_data
},
{}
};
MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
static int rockchip_lvds_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_lvds *lvds = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct device_node *remote = NULL;
struct device_node *port, *endpoint;
int ret = 0, child_count = 0;
const char *name;
u32 endpoint_id = 0;
lvds->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, 1);
if (!port) {
DRM_DEV_ERROR(dev,
"can't found port point, please init lvds panel port!\n");
return -EINVAL;
}
for_each_child_of_node(port, endpoint) {
child_count++;
of_property_read_u32(endpoint, "reg", &endpoint_id);
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
&lvds->panel, &lvds->bridge);
if (!ret) {
of_node_put(endpoint);
break;
}
}
if (!child_count) {
DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
ret = -EINVAL;
goto err_put_port;
} else if (ret) {
DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
ret = -EPROBE_DEFER;
goto err_put_port;
}
if (lvds->panel)
remote = lvds->panel->dev->of_node;
else
remote = lvds->bridge->of_node;
if (of_property_read_string(dev->of_node, "rockchip,output", &name))
/* default set it as output rgb */
lvds->output = DISPLAY_OUTPUT_RGB;
else
lvds->output = rockchip_lvds_name_to_output(name);
if (lvds->output < 0) {
DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
ret = lvds->output;
goto err_put_remote;
}
if (of_property_read_string(remote, "data-mapping", &name))
/* default set it as format vesa 18 */
lvds->format = LVDS_VESA_18;
else
lvds->format = rockchip_lvds_name_to_format(name);
if (lvds->format < 0) {
DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
ret = lvds->format;
goto err_put_remote;
}
encoder = &lvds->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dev->of_node);
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
goto err_put_remote;
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
connector = &lvds->connector;
if (lvds->panel) {
connector->dpms = DRM_MODE_DPMS_OFF;
ret = drm_connector_init(drm_dev, connector,
&rockchip_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize connector: %d\n", ret);
goto err_free_encoder;
}
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
} else {
ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize bridge connector: %pe\n",
connector);
ret = PTR_ERR(connector);
goto err_free_encoder;
}
}
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach encoder: %d\n", ret);
goto err_free_connector;
}
pm_runtime_enable(dev);
of_node_put(remote);
of_node_put(port);
return 0;
err_free_connector:
drm_connector_cleanup(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_put_remote:
of_node_put(remote);
err_put_port:
of_node_put(port);
return ret;
}
static void rockchip_lvds_unbind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_lvds *lvds = dev_get_drvdata(dev);
const struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = lvds->soc_data->helper_funcs;
encoder_funcs->disable(&lvds->encoder.encoder);
pm_runtime_disable(dev);
drm_connector_cleanup(&lvds->connector);
drm_encoder_cleanup(&lvds->encoder.encoder);
}
static const struct component_ops rockchip_lvds_component_ops = {
.bind = rockchip_lvds_bind,
.unbind = rockchip_lvds_unbind,
};
static int rockchip_lvds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_lvds *lvds;
const struct of_device_id *match;
int ret;
if (!dev->of_node)
return -ENODEV;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
lvds->dev = dev;
match = of_match_node(rockchip_lvds_dt_ids, dev->of_node);
if (!match)
return -ENODEV;
lvds->soc_data = match->data;
lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(lvds->grf)) {
DRM_DEV_ERROR(dev, "missing rockchip,grf property\n");
return PTR_ERR(lvds->grf);
}
ret = lvds->soc_data->probe(pdev, lvds);
if (ret) {
DRM_DEV_ERROR(dev, "Platform initialization failed\n");
return ret;
}
dev_set_drvdata(dev, lvds);
ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to add component\n");
clk_unprepare(lvds->pclk);
}
return ret;
}
static void rockchip_lvds_remove(struct platform_device *pdev)
{
struct rockchip_lvds *lvds = platform_get_drvdata(pdev);
component_del(&pdev->dev, &rockchip_lvds_component_ops);
clk_unprepare(lvds->pclk);
}
struct platform_driver rockchip_lvds_driver = {
.probe = rockchip_lvds_probe,
.remove_new = rockchip_lvds_remove,
.driver = {
.name = "rockchip-lvds",
.of_match_table = of_match_ptr(rockchip_lvds_dt_ids),
},
};
| linux-master | drivers/gpu/drm/rockchip/rockchip_lvds.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <[email protected]>
*/
#include <linux/kernel.h>
#include <drm/drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = drm_atomic_helper_dirtyfb,
};
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static struct drm_framebuffer *
rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_afbc_framebuffer *afbc_fb;
const struct drm_format_info *info;
int ret;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
return ERR_PTR(-ENOMEM);
afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
if (!afbc_fb)
return ERR_PTR(-ENOMEM);
ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
&rockchip_drm_fb_funcs);
if (ret) {
kfree(afbc_fb);
return ERR_PTR(ret);
}
if (drm_is_afbc(mode_cmd->modifier[0])) {
int ret, i;
ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
if (ret) {
struct drm_gem_object **obj = afbc_fb->base.obj;
for (i = 0; i < info->num_planes; ++i)
drm_gem_object_put(obj[i]);
kfree(afbc_fb);
return ERR_PTR(ret);
}
}
return &afbc_fb->base;
}
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.fb_create = rockchip_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
void rockchip_drm_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
/*
* set max width and height as default value(4096x4096).
* this value would be used to check framebuffer size limitation
* at drm_mode_addfb().
*/
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
dev->mode_config.helper_private = &rockchip_mode_config_helpers;
dev->mode_config.normalize_zpos = true;
}
| linux-master | drivers/gpu/drm/rockchip/rockchip_drm_fb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Rockchip Electronics Co.Ltd
* Author: Andy Yan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "rockchip_drm_vop2.h"
static const uint32_t formats_win_full_10bit[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
};
static const uint32_t formats_win_full_10bit_yuyv[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
DRM_FORMAT_YVYU,
DRM_FORMAT_VYUY,
};
static const uint32_t formats_win_lite[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
};
static const uint64_t format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const uint64_t format_modifiers_afbc[] = {
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_CBR),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_CBR |
AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_CBR),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_CBR |
AFBC_FORMAT_MOD_SPARSE),
/* SPLIT mandates SPARSE, RGB modes mandates YTR */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_SPLIT),
DRM_FORMAT_MOD_INVALID,
};
static const struct vop2_video_port_data rk3568_vop_video_ports[] = {
{
.id = 0,
.feature = VOP_FEATURE_OUTPUT_10BIT,
.gamma_lut_len = 1024,
.cubic_lut_len = 9 * 9 * 9,
.max_output = { 4096, 2304 },
.pre_scan_max_dly = { 69, 53, 53, 42 },
.offset = 0xc00,
}, {
.id = 1,
.gamma_lut_len = 1024,
.max_output = { 2048, 1536 },
.pre_scan_max_dly = { 40, 40, 40, 40 },
.offset = 0xd00,
}, {
.id = 2,
.gamma_lut_len = 1024,
.max_output = { 1920, 1080 },
.pre_scan_max_dly = { 40, 40, 40, 40 },
.offset = 0xe00,
},
};
/*
* rk3568 vop with 2 cluster, 2 esmart win, 2 smart win.
* Every cluster can work as 4K win or split into two win.
* All win in cluster support AFBCD.
*
* Every esmart win and smart win support 4 Multi-region.
*
* Scale filter mode:
*
* * Cluster: bicubic for horizontal scale up, others use bilinear
* * ESmart:
* * nearest-neighbor/bilinear/bicubic for scale up
* * nearest-neighbor/bilinear/average for scale down
*
*
* @TODO describe the wind like cpu-map dt nodes;
*/
static const struct vop2_win_data rk3568_vop_win_data[] = {
{
.name = "Smart0-win0",
.phys_id = ROCKCHIP_VOP2_SMART0,
.base = 0x1c00,
.formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers,
.layer_sel_id = 3,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 20, 47, 41 },
}, {
.name = "Smart1-win0",
.phys_id = ROCKCHIP_VOP2_SMART1,
.formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers,
.base = 0x1e00,
.layer_sel_id = 7,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 20, 47, 41 },
}, {
.name = "Esmart1-win0",
.phys_id = ROCKCHIP_VOP2_ESMART1,
.formats = formats_win_full_10bit_yuyv,
.nformats = ARRAY_SIZE(formats_win_full_10bit_yuyv),
.format_modifiers = format_modifiers,
.base = 0x1a00,
.layer_sel_id = 6,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 20, 47, 41 },
}, {
.name = "Esmart0-win0",
.phys_id = ROCKCHIP_VOP2_ESMART0,
.formats = formats_win_full_10bit_yuyv,
.nformats = ARRAY_SIZE(formats_win_full_10bit_yuyv),
.format_modifiers = format_modifiers,
.base = 0x1800,
.layer_sel_id = 2,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 20, 47, 41 },
}, {
.name = "Cluster0-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER0,
.base = 0x1000,
.formats = formats_win_full_10bit,
.nformats = ARRAY_SIZE(formats_win_full_10bit),
.format_modifiers = format_modifiers_afbc,
.layer_sel_id = 0,
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 0, 27, 21 },
.type = DRM_PLANE_TYPE_OVERLAY,
.feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
}, {
.name = "Cluster1-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER1,
.base = 0x1200,
.formats = formats_win_full_10bit,
.nformats = ARRAY_SIZE(formats_win_full_10bit),
.format_modifiers = format_modifiers_afbc,
.layer_sel_id = 1,
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 0, 27, 21 },
.feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
},
};
static const struct vop2_data rk3566_vop = {
.nr_vps = 3,
.max_input = { 4096, 2304 },
.max_output = { 4096, 2304 },
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
.soc_id = 3566,
};
static const struct vop2_data rk3568_vop = {
.nr_vps = 3,
.max_input = { 4096, 2304 },
.max_output = { 4096, 2304 },
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
.soc_id = 3568,
};
static const struct of_device_id vop2_dt_match[] = {
{
.compatible = "rockchip,rk3566-vop",
.data = &rk3566_vop,
}, {
.compatible = "rockchip,rk3568-vop",
.data = &rk3568_vop,
}, {
},
};
MODULE_DEVICE_TABLE(of, vop2_dt_match);
static int vop2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
return component_add(dev, &vop2_component_ops);
}
static void vop2_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vop2_component_ops);
}
struct platform_driver vop2_platform_driver = {
.probe = vop2_probe,
.remove_new = vop2_remove,
.driver = {
.name = "rockchip-vop2",
.of_match_table = of_match_ptr(vop2_dt_match),
},
};
| linux-master | drivers/gpu/drm/rockchip/rockchip_vop2_reg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Rockchip SoC DP (Display Port) interface driver.
*
* Copyright (C) Fuzhou Rockchip Electronics Co., Ltd.
* Author: Andy Yan <[email protected]>
* Yakir Yang <[email protected]>
* Jeff Chen <[email protected]>
*/
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define RK3288_GRF_SOC_CON6 0x25c
#define RK3288_EDP_LCDC_SEL BIT(5)
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_EDP_LCDC_SEL BIT(5)
#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
* @lcdsel_grf_reg: grf register offset of lcdc select
* @lcdsel_big: reg value of selecting vop big for eDP
* @lcdsel_lit: reg value of selecting vop little for eDP
* @chip_type: specific chip type
*/
struct rockchip_dp_chip_data {
u32 lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
u32 chip_type;
};
struct rockchip_dp_device {
struct drm_device *drm_dev;
struct device *dev;
struct rockchip_encoder encoder;
struct drm_display_mode mode;
struct clk *pclk;
struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
const struct rockchip_dp_chip_data *data;
struct analogix_dp_device *adp;
struct analogix_dp_plat_data plat_data;
};
static struct rockchip_dp_device *encoder_to_dp(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct rockchip_dp_device, encoder);
}
static struct rockchip_dp_device *pdata_encoder_to_dp(struct analogix_dp_plat_data *plat_data)
{
return container_of(plat_data, struct rockchip_dp_device, plat_data);
}
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
{
reset_control_assert(dp->rst);
usleep_range(10, 20);
reset_control_deassert(dp->rst);
return 0;
}
static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
int ret;
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
return ret;
}
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to dp pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
return ret;
}
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
clk_disable_unprepare(dp->pclk);
return 0;
}
static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
struct drm_connector *connector)
{
struct drm_display_info *di = &connector->display_info;
/* VOP couldn't output YUV video format for eDP rightly */
u32 mask = DRM_COLOR_FORMAT_YCBCR444 | DRM_COLOR_FORMAT_YCBCR422;
if ((di->color_formats & mask)) {
DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
di->color_formats &= ~mask;
di->color_formats |= DRM_COLOR_FORMAT_RGB444;
di->bpc = 8;
}
return 0;
}
static bool
rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* do nothing */
return true;
}
static void rockchip_dp_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted)
{
/* do nothing */
}
static
struct drm_crtc *rockchip_dp_drm_get_new_crtc(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
if (!connector)
return NULL;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (!conn_state)
return NULL;
return conn_state->crtc;
}
static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct rockchip_dp_device *dp = encoder_to_dp(encoder);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
u32 val;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
if (!crtc)
return;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
/* Coming back from self refresh, nothing to do */
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
if (ret < 0)
return;
if (ret)
val = dp->data->lcdsel_lit;
else
val = dp->data->lcdsel_big;
DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
ret = clk_prepare_enable(dp->grfclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(dp->grfclk);
}
static void rockchip_dp_drm_encoder_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct rockchip_dp_device *dp = encoder_to_dp(encoder);
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
/* No crtc means we're doing a full shutdown */
if (!crtc)
return;
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
/* If we're not entering self-refresh, no need to wait for vact */
if (!new_crtc_state || !new_crtc_state->self_refresh_active)
return;
ret = rockchip_drm_wait_vact_end(crtc, PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
if (ret)
DRM_DEV_ERROR(dp->dev, "line flag irq timed out\n");
}
static int
rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
/*
* The hardware IC designed that VOP must output the RGB10 video
* format to eDP controller, and if eDP panel only support RGB8,
* then eDP controller should cut down the video data, not via VOP
* controller, that's why we need to hardcode the VOP output mode
* to RGA10 here.
*/
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_eDP;
s->output_bpc = di->bpc;
return 0;
}
static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
.mode_set = rockchip_dp_drm_encoder_mode_set,
.atomic_enable = rockchip_dp_drm_encoder_enable,
.atomic_disable = rockchip_dp_drm_encoder_disable,
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
{
struct device *dev = dp->dev;
struct device_node *np = dev->of_node;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n");
return PTR_ERR(dp->grf);
}
dp->grfclk = devm_clk_get(dev, "grf");
if (PTR_ERR(dp->grfclk) == -ENOENT) {
dp->grfclk = NULL;
} else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(dp->grfclk)) {
DRM_DEV_ERROR(dev, "failed to get grf clock\n");
return PTR_ERR(dp->grfclk);
}
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
DRM_DEV_ERROR(dev, "failed to get pclk property\n");
return PTR_ERR(dp->pclk);
}
dp->rst = devm_reset_control_get(dev, "dp");
if (IS_ERR(dp->rst)) {
DRM_DEV_ERROR(dev, "failed to get dp reset control\n");
return PTR_ERR(dp->rst);
}
return 0;
}
static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
{
struct drm_encoder *encoder = &dp->encoder.encoder;
struct drm_device *drm_dev = dp->drm_dev;
struct device *dev = dp->dev;
int ret;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
ret = drm_simple_encoder_init(drm_dev, encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
}
drm_encoder_helper_add(encoder, &rockchip_dp_encoder_helper_funcs);
return 0;
}
static int rockchip_dp_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
int ret;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
if (ret) {
DRM_ERROR("failed to create drm encoder\n");
return ret;
}
dp->plat_data.encoder = &dp->encoder.encoder;
ret = analogix_dp_bind(dp->adp, drm_dev);
if (ret)
goto err_cleanup_encoder;
return 0;
err_cleanup_encoder:
dp->encoder.encoder.funcs->destroy(&dp->encoder.encoder);
return ret;
}
static void rockchip_dp_unbind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
analogix_dp_unbind(dp->adp);
dp->encoder.encoder.funcs->destroy(&dp->encoder.encoder);
}
static const struct component_ops rockchip_dp_component_ops = {
.bind = rockchip_dp_bind,
.unbind = rockchip_dp_unbind,
};
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rockchip_dp_chip_data *dp_data;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
int ret;
dp_data = of_device_get_match_data(dev);
if (!dp_data)
return -ENODEV;
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret < 0)
return ret;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
dp->data = dp_data;
dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on_start = rockchip_dp_poweron_start;
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
ret = rockchip_dp_of_probe(dp);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, dp);
dp->adp = analogix_dp_probe(dev, &dp->plat_data);
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
ret = component_add(dev, &rockchip_dp_component_ops);
if (ret)
goto err_dp_remove;
return 0;
err_dp_remove:
analogix_dp_remove(dp->adp);
return ret;
}
static void rockchip_dp_remove(struct platform_device *pdev)
{
struct rockchip_dp_device *dp = platform_get_drvdata(pdev);
component_del(&pdev->dev, &rockchip_dp_component_ops);
analogix_dp_remove(dp->adp);
}
#ifdef CONFIG_PM_SLEEP
static int rockchip_dp_suspend(struct device *dev)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
if (IS_ERR(dp->adp))
return 0;
return analogix_dp_suspend(dp->adp);
}
static int rockchip_dp_resume(struct device *dev)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
if (IS_ERR(dp->adp))
return 0;
return analogix_dp_resume(dp->adp);
}
#endif
static const struct dev_pm_ops rockchip_dp_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend_late = rockchip_dp_suspend,
.resume_early = rockchip_dp_resume,
#endif
};
static const struct rockchip_dp_chip_data rk3399_edp = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
.chip_type = RK3399_EDP,
};
static const struct rockchip_dp_chip_data rk3288_dp = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
.chip_type = RK3288_DP,
};
static const struct of_device_id rockchip_dp_dt_ids[] = {
{.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
{.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
struct platform_driver rockchip_dp_driver = {
.probe = rockchip_dp_probe,
.remove_new = rockchip_dp_remove,
.driver = {
.name = "rockchip-dp",
.pm = &rockchip_dp_pm_ops,
.of_match_table = rockchip_dp_dt_ids,
},
};
| linux-master | drivers/gpu/drm/rockchip/analogix_dp-rockchip.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:
* Chris Zhong <[email protected]>
* Nickey Yang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <video/mipi_display.h>
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define DSI_PHY_RSTZ 0xa0
#define PHY_DISFORCEPLL 0
#define PHY_ENFORCEPLL BIT(3)
#define PHY_DISABLECLK 0
#define PHY_ENABLECLK BIT(2)
#define PHY_RSTZ 0
#define PHY_UNRSTZ BIT(1)
#define PHY_SHUTDOWNZ 0
#define PHY_UNSHUTDOWNZ BIT(0)
#define DSI_PHY_IF_CFG 0xa4
#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
#define DSI_PHY_STATUS 0xb0
#define LOCK BIT(0)
#define STOP_STATE_CLK_LANE BIT(2)
#define DSI_PHY_TST_CTRL0 0xb4
#define PHY_TESTCLK BIT(1)
#define PHY_UNTESTCLK 0
#define PHY_TESTCLR BIT(0)
#define PHY_UNTESTCLR 0
#define DSI_PHY_TST_CTRL1 0xb8
#define PHY_TESTEN BIT(16)
#define PHY_UNTESTEN 0
#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
#define DSI_INT_ST0 0xbc
#define DSI_INT_ST1 0xc0
#define DSI_INT_MSK0 0xc4
#define DSI_INT_MSK1 0xc8
#define PHY_STATUS_TIMEOUT_US 10000
#define CMD_PKT_STATUS_TIMEOUT_US 20000
#define BYPASS_VCO_RANGE BIT(7)
#define VCO_RANGE_CON_SEL(val) (((val) & 0x7) << 3)
#define VCO_IN_CAP_CON_DEFAULT (0x0 << 1)
#define VCO_IN_CAP_CON_LOW (0x1 << 1)
#define VCO_IN_CAP_CON_HIGH (0x2 << 1)
#define REF_BIAS_CUR_SEL BIT(0)
#define CP_CURRENT_3UA 0x1
#define CP_CURRENT_4_5UA 0x2
#define CP_CURRENT_7_5UA 0x6
#define CP_CURRENT_6UA 0x9
#define CP_CURRENT_12UA 0xb
#define CP_CURRENT_SEL(val) ((val) & 0xf)
#define CP_PROGRAM_EN BIT(7)
#define LPF_RESISTORS_15_5KOHM 0x1
#define LPF_RESISTORS_13KOHM 0x2
#define LPF_RESISTORS_11_5KOHM 0x4
#define LPF_RESISTORS_10_5KOHM 0x8
#define LPF_RESISTORS_8KOHM 0x10
#define LPF_PROGRAM_EN BIT(6)
#define LPF_RESISTORS_SEL(val) ((val) & 0x3f)
#define HSFREQRANGE_SEL(val) (((val) & 0x3f) << 1)
#define INPUT_DIVIDER(val) (((val) - 1) & 0x7f)
#define LOW_PROGRAM_EN 0
#define HIGH_PROGRAM_EN BIT(7)
#define LOOP_DIV_LOW_SEL(val) (((val) - 1) & 0x1f)
#define LOOP_DIV_HIGH_SEL(val) ((((val) - 1) >> 5) & 0xf)
#define PLL_LOOP_DIV_EN BIT(5)
#define PLL_INPUT_DIV_EN BIT(4)
#define POWER_CONTROL BIT(6)
#define INTERNAL_REG_CURRENT BIT(3)
#define BIAS_BLOCK_ON BIT(2)
#define BANDGAP_ON BIT(0)
#define TER_RESISTOR_HIGH BIT(7)
#define TER_RESISTOR_LOW 0
#define LEVEL_SHIFTERS_ON BIT(6)
#define TER_CAL_DONE BIT(5)
#define SETRD_MAX (0x7 << 2)
#define POWER_MANAGE BIT(1)
#define TER_RESISTORS_ON BIT(0)
#define BIASEXTR_SEL(val) ((val) & 0x7)
#define BANDGAP_SEL(val) ((val) & 0x7)
#define TLP_PROGRAM_EN BIT(7)
#define THS_PRE_PROGRAM_EN BIT(7)
#define THS_ZERO_PROGRAM_EN BIT(6)
#define PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL 0x10
#define PLL_CP_CONTROL_PLL_LOCK_BYPASS 0x11
#define PLL_LPF_AND_CP_CONTROL 0x12
#define PLL_INPUT_DIVIDER_RATIO 0x17
#define PLL_LOOP_DIVIDER_RATIO 0x18
#define PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL 0x19
#define BANDGAP_AND_BIAS_CONTROL 0x20
#define TERMINATION_RESISTER_CONTROL 0x21
#define AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY 0x22
#define HS_RX_CONTROL_OF_LANE_CLK 0x34
#define HS_RX_CONTROL_OF_LANE_0 0x44
#define HS_RX_CONTROL_OF_LANE_1 0x54
#define HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL 0x60
#define HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL 0x61
#define HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL 0x62
#define HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL 0x63
#define HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL 0x64
#define HS_TX_CLOCK_LANE_POST_TIME_CONTROL 0x65
#define HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL 0x70
#define HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL 0x71
#define HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL 0x72
#define HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL 0x73
#define HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL 0x74
#define HS_RX_DATA_LANE_THS_SETTLE_CONTROL 0x75
#define HS_RX_CONTROL_OF_LANE_2 0x84
#define HS_RX_CONTROL_OF_LANE_3 0x94
#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
#define PX30_GRF_PD_VO_CON1 0x0438
#define PX30_DSI_FORCETXSTOPMODE (0xf << 7)
#define PX30_DSI_FORCERXMODE BIT(6)
#define PX30_DSI_TURNDISABLE BIT(5)
#define PX30_DSI_LCDC_SEL BIT(0)
#define RK3288_GRF_SOC_CON6 0x025c
#define RK3288_DSI0_LCDC_SEL BIT(6)
#define RK3288_DSI1_LCDC_SEL BIT(9)
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_DSI0_LCDC_SEL BIT(0)
#define RK3399_DSI1_LCDC_SEL BIT(4)
#define RK3399_GRF_SOC_CON22 0x6258
#define RK3399_DSI0_TURNREQUEST (0xf << 12)
#define RK3399_DSI0_TURNDISABLE (0xf << 8)
#define RK3399_DSI0_FORCETXSTOPMODE (0xf << 4)
#define RK3399_DSI0_FORCERXMODE (0xf << 0)
#define RK3399_GRF_SOC_CON23 0x625c
#define RK3399_DSI1_TURNDISABLE (0xf << 12)
#define RK3399_DSI1_FORCETXSTOPMODE (0xf << 8)
#define RK3399_DSI1_FORCERXMODE (0xf << 4)
#define RK3399_DSI1_ENABLE (0xf << 0)
#define RK3399_GRF_SOC_CON24 0x6260
#define RK3399_TXRX_MASTERSLAVEZ BIT(7)
#define RK3399_TXRX_ENABLECLK BIT(6)
#define RK3399_TXRX_BASEDIR BIT(5)
#define RK3399_TXRX_SRC_SEL_ISP0 BIT(4)
#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0)
#define RK3568_GRF_VO_CON2 0x0368
#define RK3568_DSI0_SKEWCALHS (0x1f << 11)
#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4)
#define RK3568_DSI0_TURNDISABLE BIT(2)
#define RK3568_DSI0_FORCERXMODE BIT(0)
/*
* Note these registers do not appear in the datasheet, they are
* however present in the BSP driver which is where these values
* come from. Name GRF_VO_CON3 is assumed.
*/
#define RK3568_GRF_VO_CON3 0x36c
#define RK3568_DSI1_SKEWCALHS (0x1f << 11)
#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4)
#define RK3568_DSI1_TURNDISABLE BIT(2)
#define RK3568_DSI1_FORCERXMODE BIT(0)
#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
enum {
DW_DSI_USAGE_IDLE,
DW_DSI_USAGE_DSI,
DW_DSI_USAGE_PHY,
};
enum {
BANDGAP_97_07,
BANDGAP_98_05,
BANDGAP_99_02,
BANDGAP_100_00,
BANDGAP_93_17,
BANDGAP_94_15,
BANDGAP_95_12,
BANDGAP_96_10,
};
enum {
BIASEXTR_87_1,
BIASEXTR_91_5,
BIASEXTR_95_9,
BIASEXTR_100,
BIASEXTR_105_94,
BIASEXTR_111_88,
BIASEXTR_118_8,
BIASEXTR_127_7,
};
struct rockchip_dw_dsi_chip_data {
u32 reg;
u32 lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
u32 enable_grf_reg;
u32 enable;
u32 lanecfg1_grf_reg;
u32 lanecfg1;
u32 lanecfg2_grf_reg;
u32 lanecfg2;
int (*dphy_rx_init)(struct phy *phy);
int (*dphy_rx_power_on)(struct phy *phy);
int (*dphy_rx_power_off)(struct phy *phy);
unsigned int flags;
unsigned int max_data_lanes;
};
struct dw_mipi_dsi_rockchip {
struct device *dev;
struct rockchip_encoder encoder;
void __iomem *base;
struct regmap *grf_regmap;
struct clk *pclk;
struct clk *pllref_clk;
struct clk *grf_clk;
struct clk *phy_cfg_clk;
/* dual-channel */
bool is_slave;
struct dw_mipi_dsi_rockchip *slave;
/* optional external dphy */
struct phy *phy;
union phy_configure_opts phy_opts;
/* being a phy for other mipi hosts */
unsigned int usage_mode;
struct mutex usage_mutex;
struct phy *dphy;
struct phy_configure_opts_mipi_dphy dphy_config;
unsigned int lane_mbps; /* per lane */
u16 input_div;
u16 feedback_div;
u32 format;
struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata;
bool dsi_bound;
};
static struct dw_mipi_dsi_rockchip *to_dsi(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
return container_of(rkencoder, struct dw_mipi_dsi_rockchip, encoder);
}
struct dphy_pll_parameter_map {
unsigned int max_mbps;
u8 hsfreqrange;
u8 icpctrl;
u8 lpfctrl;
};
/* The table is based on 27MHz DPHY pll reference clock. */
static const struct dphy_pll_parameter_map dppa_map[] = {
{ 89, 0x00, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
{ 99, 0x10, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
{ 109, 0x20, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
{ 129, 0x01, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
{ 139, 0x11, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
{ 149, 0x21, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
{ 169, 0x02, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
{ 179, 0x12, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
{ 199, 0x22, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
{ 219, 0x03, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
{ 239, 0x13, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
{ 249, 0x23, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
{ 269, 0x04, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
{ 299, 0x14, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
{ 329, 0x05, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
{ 359, 0x15, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
{ 399, 0x25, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
{ 449, 0x06, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 499, 0x16, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 549, 0x07, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
{ 599, 0x17, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
{ 649, 0x08, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 699, 0x18, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 749, 0x09, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 799, 0x19, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 849, 0x29, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 899, 0x39, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
{ 949, 0x0a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
{ 999, 0x1a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
{1049, 0x2a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
{1099, 0x3a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
{1149, 0x0b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1199, 0x1b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1249, 0x2b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1299, 0x3b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1349, 0x0c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1399, 0x1c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1449, 0x2c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
{1500, 0x3c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM }
};
static int max_mbps_to_parameter(unsigned int max_mbps)
{
int i;
for (i = 0; i < ARRAY_SIZE(dppa_map); i++)
if (dppa_map[i].max_mbps >= max_mbps)
return i;
return -EINVAL;
}
static inline void dsi_write(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 val)
{
writel(val, dsi->base + reg);
}
static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
u8 test_code,
u8 test_data)
{
/*
* With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
* is latched internally as the current test code. Test data is
* programmed internally by rising edge on TESTCLK.
*/
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
PHY_TESTDIN(test_code));
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
PHY_TESTDIN(test_data));
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
}
/*
* ns2bc - Nanoseconds to byte clock cycles
*/
static inline unsigned int ns2bc(struct dw_mipi_dsi_rockchip *dsi, int ns)
{
return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
}
/*
* ns2ui - Nanoseconds to UI time periods
*/
static inline unsigned int ns2ui(struct dw_mipi_dsi_rockchip *dsi, int ns)
{
return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
}
static int dw_mipi_dsi_phy_init(void *priv_data)
{
struct dw_mipi_dsi_rockchip *dsi = priv_data;
int ret, i, vco;
if (dsi->phy)
return 0;
/*
* Get vco from frequency(lane_mbps)
* vco frequency table
* 000 - between 80 and 200 MHz
* 001 - between 200 and 300 MHz
* 010 - between 300 and 500 MHz
* 011 - between 500 and 700 MHz
* 100 - between 700 and 900 MHz
* 101 - between 900 and 1100 MHz
* 110 - between 1100 and 1300 MHz
* 111 - between 1300 and 1500 MHz
*/
vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
i = max_mbps_to_parameter(dsi->lane_mbps);
if (i < 0) {
DRM_DEV_ERROR(dsi->dev,
"failed to get parameter for %dmbps clock\n",
dsi->lane_mbps);
return i;
}
ret = clk_prepare_enable(dsi->phy_cfg_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
return ret;
}
dw_mipi_dsi_phy_write(dsi, PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL,
BYPASS_VCO_RANGE |
VCO_RANGE_CON_SEL(vco) |
VCO_IN_CAP_CON_LOW |
REF_BIAS_CUR_SEL);
dw_mipi_dsi_phy_write(dsi, PLL_CP_CONTROL_PLL_LOCK_BYPASS,
CP_CURRENT_SEL(dppa_map[i].icpctrl));
dw_mipi_dsi_phy_write(dsi, PLL_LPF_AND_CP_CONTROL,
CP_PROGRAM_EN | LPF_PROGRAM_EN |
LPF_RESISTORS_SEL(dppa_map[i].lpfctrl));
dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_0,
HSFREQRANGE_SEL(dppa_map[i].hsfreqrange));
dw_mipi_dsi_phy_write(dsi, PLL_INPUT_DIVIDER_RATIO,
INPUT_DIVIDER(dsi->input_div));
dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
LOOP_DIV_LOW_SEL(dsi->feedback_div) |
LOW_PROGRAM_EN);
/*
* We need set PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL immediately
* to make the configured LSB effective according to IP simulation
* and lab test results.
* Only in this way can we get correct mipi phy pll frequency.
*/
dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
HIGH_PROGRAM_EN);
dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
LOW_PROGRAM_EN | BIASEXTR_SEL(BIASEXTR_127_7));
dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
HIGH_PROGRAM_EN | BANDGAP_SEL(BANDGAP_96_10));
dw_mipi_dsi_phy_write(dsi, BANDGAP_AND_BIAS_CONTROL,
POWER_CONTROL | INTERNAL_REG_CURRENT |
BIAS_BLOCK_ON | BANDGAP_ON);
dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
TER_RESISTOR_LOW | TER_CAL_DONE |
SETRD_MAX | TER_RESISTORS_ON);
dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
SETRD_MAX | POWER_MANAGE |
TER_RESISTORS_ON);
dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL,
TLP_PROGRAM_EN | ns2bc(dsi, 500));
dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL,
THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL,
THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL,
THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL,
BIT(5) | ns2bc(dsi, 100));
dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_POST_TIME_CONTROL,
BIT(5) | (ns2bc(dsi, 60) + 7));
dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL,
TLP_PROGRAM_EN | ns2bc(dsi, 500));
dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL,
THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 20));
dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL,
THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL,
THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL,
BIT(5) | ns2bc(dsi, 100));
clk_disable_unprepare(dsi->phy_cfg_clk);
return ret;
}
static void dw_mipi_dsi_phy_power_on(void *priv_data)
{
struct dw_mipi_dsi_rockchip *dsi = priv_data;
int ret;
ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "failed to set phy mode: %d\n", ret);
return;
}
phy_configure(dsi->phy, &dsi->phy_opts);
phy_power_on(dsi->phy);
}
static void dw_mipi_dsi_phy_power_off(void *priv_data)
{
struct dw_mipi_dsi_rockchip *dsi = priv_data;
phy_power_off(dsi->phy);
}
static int
dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
unsigned int *lane_mbps)
{
struct dw_mipi_dsi_rockchip *dsi = priv_data;
int bpp;
unsigned long mpclk, tmp;
unsigned int target_mbps = 1000;
unsigned int max_mbps = dppa_map[ARRAY_SIZE(dppa_map) - 1].max_mbps;
unsigned long best_freq = 0;
unsigned long fvco_min, fvco_max, fin, fout;
unsigned int min_prediv, max_prediv;
unsigned int _prediv, best_prediv;
unsigned long _fbdiv, best_fbdiv;
unsigned long min_delta = ULONG_MAX;
dsi->format = format;
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (bpp < 0) {
DRM_DEV_ERROR(dsi->dev,
"failed to get bpp for pixel format %d\n",
dsi->format);
return bpp;
}
mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
if (mpclk) {
/* take 1 / 0.8, since mbps must big than bandwidth of RGB */
tmp = mpclk * (bpp / lanes) * 10 / 8;
if (tmp < max_mbps)
target_mbps = tmp;
else
DRM_DEV_ERROR(dsi->dev,
"DPHY clock frequency is out of range\n");
}
/* for external phy only a the mipi_dphy_config is necessary */
if (dsi->phy) {
phy_mipi_dphy_get_default_config(mode->clock * 1000 * 10 / 8,
bpp, lanes,
&dsi->phy_opts.mipi_dphy);
dsi->lane_mbps = target_mbps;
*lane_mbps = dsi->lane_mbps;
return 0;
}
fin = clk_get_rate(dsi->pllref_clk);
fout = target_mbps * USEC_PER_SEC;
/* constraint: 5Mhz <= Fref / N <= 40MHz */
min_prediv = DIV_ROUND_UP(fin, 40 * USEC_PER_SEC);
max_prediv = fin / (5 * USEC_PER_SEC);
/* constraint: 80MHz <= Fvco <= 1500Mhz */
fvco_min = 80 * USEC_PER_SEC;
fvco_max = 1500 * USEC_PER_SEC;
for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
u64 tmp;
u32 delta;
/* Fvco = Fref * M / N */
tmp = (u64)fout * _prediv;
do_div(tmp, fin);
_fbdiv = tmp;
/*
* Due to the use of a "by 2 pre-scaler," the range of the
* feedback multiplication value M is limited to even division
* numbers, and m must be greater than 6, not bigger than 512.
*/
if (_fbdiv < 6 || _fbdiv > 512)
continue;
_fbdiv += _fbdiv % 2;
tmp = (u64)_fbdiv * fin;
do_div(tmp, _prediv);
if (tmp < fvco_min || tmp > fvco_max)
continue;
delta = abs(fout - tmp);
if (delta < min_delta) {
best_prediv = _prediv;
best_fbdiv = _fbdiv;
min_delta = delta;
best_freq = tmp;
}
}
if (best_freq) {
dsi->lane_mbps = DIV_ROUND_UP(best_freq, USEC_PER_SEC);
*lane_mbps = dsi->lane_mbps;
dsi->input_div = best_prediv;
dsi->feedback_div = best_fbdiv;
} else {
DRM_DEV_ERROR(dsi->dev, "Can not find best_freq for DPHY\n");
return -EINVAL;
}
return 0;
}
struct hstt {
unsigned int maxfreq;
struct dw_mipi_dsi_dphy_timing timing;
};
#define HSTT(_maxfreq, _c_lp2hs, _c_hs2lp, _d_lp2hs, _d_hs2lp) \
{ \
.maxfreq = _maxfreq, \
.timing = { \
.clk_lp2hs = _c_lp2hs, \
.clk_hs2lp = _c_hs2lp, \
.data_lp2hs = _d_lp2hs, \
.data_hs2lp = _d_hs2lp, \
} \
}
/* Table A-3 High-Speed Transition Times */
static struct hstt hstt_table[] = {
HSTT( 90, 32, 20, 26, 13),
HSTT( 100, 35, 23, 28, 14),
HSTT( 110, 32, 22, 26, 13),
HSTT( 130, 31, 20, 27, 13),
HSTT( 140, 33, 22, 26, 14),
HSTT( 150, 33, 21, 26, 14),
HSTT( 170, 32, 20, 27, 13),
HSTT( 180, 36, 23, 30, 15),
HSTT( 200, 40, 22, 33, 15),
HSTT( 220, 40, 22, 33, 15),
HSTT( 240, 44, 24, 36, 16),
HSTT( 250, 48, 24, 38, 17),
HSTT( 270, 48, 24, 38, 17),
HSTT( 300, 50, 27, 41, 18),
HSTT( 330, 56, 28, 45, 18),
HSTT( 360, 59, 28, 48, 19),
HSTT( 400, 61, 30, 50, 20),
HSTT( 450, 67, 31, 55, 21),
HSTT( 500, 73, 31, 59, 22),
HSTT( 550, 79, 36, 63, 24),
HSTT( 600, 83, 37, 68, 25),
HSTT( 650, 90, 38, 73, 27),
HSTT( 700, 95, 40, 77, 28),
HSTT( 750, 102, 40, 84, 28),
HSTT( 800, 106, 42, 87, 30),
HSTT( 850, 113, 44, 93, 31),
HSTT( 900, 118, 47, 98, 32),
HSTT( 950, 124, 47, 102, 34),
HSTT(1000, 130, 49, 107, 35),
HSTT(1050, 135, 51, 111, 37),
HSTT(1100, 139, 51, 114, 38),
HSTT(1150, 146, 54, 120, 40),
HSTT(1200, 153, 57, 125, 41),
HSTT(1250, 158, 58, 130, 42),
HSTT(1300, 163, 58, 135, 44),
HSTT(1350, 168, 60, 140, 45),
HSTT(1400, 172, 64, 144, 47),
HSTT(1450, 176, 65, 148, 48),
HSTT(1500, 181, 66, 153, 50)
};
static int
dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
struct dw_mipi_dsi_dphy_timing *timing)
{
int i;
for (i = 0; i < ARRAY_SIZE(hstt_table); i++)
if (lane_mbps < hstt_table[i].maxfreq)
break;
if (i == ARRAY_SIZE(hstt_table))
i--;
*timing = hstt_table[i].timing;
return 0;
}
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
.power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
.get_timing = dw_mipi_dsi_phy_get_timing,
};
static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
{
if (dsi->cdata->lanecfg1_grf_reg)
regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
dsi->cdata->lanecfg1);
if (dsi->cdata->lanecfg2_grf_reg)
regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg2_grf_reg,
dsi->cdata->lanecfg2);
if (dsi->cdata->enable_grf_reg)
regmap_write(dsi->grf_regmap, dsi->cdata->enable_grf_reg,
dsi->cdata->enable);
}
static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
int mux)
{
if (dsi->cdata->lcdsel_grf_reg)
regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
}
static int
dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
s->output_mode = ROCKCHIP_OUT_MODE_P888;
break;
case MIPI_DSI_FMT_RGB666:
s->output_mode = ROCKCHIP_OUT_MODE_P666;
break;
case MIPI_DSI_FMT_RGB565:
s->output_mode = ROCKCHIP_OUT_MODE_P565;
break;
default:
WARN_ON(1);
return -EINVAL;
}
s->output_type = DRM_MODE_CONNECTOR_DSI;
if (dsi->slave)
s->output_flags = ROCKCHIP_OUTPUT_DSI_DUAL;
return 0;
}
static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
int ret, mux;
mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node,
&dsi->encoder.encoder);
if (mux < 0)
return;
/*
* For the RK3399, the clk of grf must be enabled before writing grf
* register. And for RK3288 or other soc, this grf_clk must be NULL,
* the clk_prepare_enable return true directly.
*/
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
return;
}
dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
if (dsi->slave)
dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
clk_disable_unprepare(dsi->grf_clk);
}
static const struct drm_encoder_helper_funcs
dw_mipi_dsi_encoder_helper_funcs = {
.atomic_check = dw_mipi_dsi_encoder_atomic_check,
.enable = dw_mipi_dsi_encoder_enable,
};
static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
struct drm_device *drm_dev)
{
struct drm_encoder *encoder = &dsi->encoder.encoder;
int ret;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dsi->dev->of_node);
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to initialize encoder with drm\n");
return ret;
}
drm_encoder_helper_add(encoder, &dw_mipi_dsi_encoder_helper_funcs);
return 0;
}
static struct device
*dw_mipi_dsi_rockchip_find_second(struct dw_mipi_dsi_rockchip *dsi)
{
const struct of_device_id *match;
struct device_node *node = NULL, *local;
match = of_match_device(dsi->dev->driver->of_match_table, dsi->dev);
local = of_graph_get_remote_node(dsi->dev->of_node, 1, 0);
if (!local)
return NULL;
while ((node = of_find_compatible_node(node, NULL,
match->compatible))) {
struct device_node *remote;
/* found ourself */
if (node == dsi->dev->of_node)
continue;
remote = of_graph_get_remote_node(node, 1, 0);
if (!remote)
continue;
/* same display device in port1-ep0 for both */
if (remote == local) {
struct dw_mipi_dsi_rockchip *dsi2;
struct platform_device *pdev;
pdev = of_find_device_by_node(node);
/*
* we have found the second, so will either return it
* or return with an error. In any case won't need the
* nodes anymore nor continue the loop.
*/
of_node_put(remote);
of_node_put(node);
of_node_put(local);
if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
dsi2 = platform_get_drvdata(pdev);
if (!dsi2) {
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
return &pdev->dev;
}
of_node_put(remote);
}
of_node_put(local);
return NULL;
}
static int dw_mipi_dsi_rockchip_bind(struct device *dev,
struct device *master,
void *data)
{
struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct device *second;
bool master1, master2;
int ret;
second = dw_mipi_dsi_rockchip_find_second(dsi);
if (IS_ERR(second))
return PTR_ERR(second);
if (second) {
master1 = of_property_read_bool(dsi->dev->of_node,
"clock-master");
master2 = of_property_read_bool(second->of_node,
"clock-master");
if (master1 && master2) {
DRM_DEV_ERROR(dsi->dev, "only one clock-master allowed\n");
return -EINVAL;
}
if (!master1 && !master2) {
DRM_DEV_ERROR(dsi->dev, "no clock-master defined\n");
return -EINVAL;
}
/* we are the slave in dual-DSI */
if (!master1) {
dsi->is_slave = true;
return 0;
}
dsi->slave = dev_get_drvdata(second);
if (!dsi->slave) {
DRM_DEV_ERROR(dev, "could not get slaves data\n");
return -ENODEV;
}
dsi->slave->is_slave = true;
dw_mipi_dsi_set_slave(dsi->dmd, dsi->slave->dmd);
put_device(second);
}
pm_runtime_get_sync(dsi->dev);
if (dsi->slave)
pm_runtime_get_sync(dsi->slave->dev);
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
goto out_pm_runtime;
}
/*
* With the GRF clock running, write lane and dual-mode configurations
* that won't change immediately. If we waited until enable() to do
* this, things like panel preparation would not be able to send
* commands over DSI.
*/
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
goto out_pll_clk;
}
dw_mipi_dsi_rockchip_config(dsi);
if (dsi->slave)
dw_mipi_dsi_rockchip_config(dsi->slave);
clk_disable_unprepare(dsi->grf_clk);
ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
goto out_pll_clk;
}
rockchip_drm_encoder_set_crtc_endpoint_id(&dsi->encoder,
dev->of_node, 0, 0);
ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder.encoder);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret);
goto out_pll_clk;
}
dsi->dsi_bound = true;
return 0;
out_pll_clk:
clk_disable_unprepare(dsi->pllref_clk);
out_pm_runtime:
pm_runtime_put(dsi->dev);
if (dsi->slave)
pm_runtime_put(dsi->slave->dev);
return ret;
}
static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
struct device *master,
void *data)
{
struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
if (dsi->is_slave)
return;
dsi->dsi_bound = false;
dw_mipi_dsi_unbind(dsi->dmd);
clk_disable_unprepare(dsi->pllref_clk);
pm_runtime_put(dsi->dev);
if (dsi->slave)
pm_runtime_put(dsi->slave->dev);
}
static const struct component_ops dw_mipi_dsi_rockchip_ops = {
.bind = dw_mipi_dsi_rockchip_bind,
.unbind = dw_mipi_dsi_rockchip_unbind,
};
static int dw_mipi_dsi_rockchip_host_attach(void *priv_data,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi_rockchip *dsi = priv_data;
struct device *second;
int ret;
mutex_lock(&dsi->usage_mutex);
if (dsi->usage_mode != DW_DSI_USAGE_IDLE) {
DRM_DEV_ERROR(dsi->dev, "dsi controller already in use\n");
mutex_unlock(&dsi->usage_mutex);
return -EBUSY;
}
dsi->usage_mode = DW_DSI_USAGE_DSI;
mutex_unlock(&dsi->usage_mutex);
ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_ops);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n",
ret);
goto out;
}
second = dw_mipi_dsi_rockchip_find_second(dsi);
if (IS_ERR(second)) {
ret = PTR_ERR(second);
goto out;
}
if (second) {
ret = component_add(second, &dw_mipi_dsi_rockchip_ops);
if (ret) {
DRM_DEV_ERROR(second,
"Failed to register component: %d\n",
ret);
goto out;
}
}
return 0;
out:
mutex_lock(&dsi->usage_mutex);
dsi->usage_mode = DW_DSI_USAGE_IDLE;
mutex_unlock(&dsi->usage_mutex);
return ret;
}
static int dw_mipi_dsi_rockchip_host_detach(void *priv_data,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi_rockchip *dsi = priv_data;
struct device *second;
second = dw_mipi_dsi_rockchip_find_second(dsi);
if (second && !IS_ERR(second))
component_del(second, &dw_mipi_dsi_rockchip_ops);
component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
mutex_lock(&dsi->usage_mutex);
dsi->usage_mode = DW_DSI_USAGE_IDLE;
mutex_unlock(&dsi->usage_mutex);
return 0;
}
static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = {
.attach = dw_mipi_dsi_rockchip_host_attach,
.detach = dw_mipi_dsi_rockchip_host_detach,
};
static int dw_mipi_dsi_rockchip_dphy_bind(struct device *dev,
struct device *master,
void *data)
{
/*
* Nothing to do when used as a dphy.
* Just make the rest of Rockchip-DRM happy
* by being here.
*/
return 0;
}
static void dw_mipi_dsi_rockchip_dphy_unbind(struct device *dev,
struct device *master,
void *data)
{
/* Nothing to do when used as a dphy. */
}
static const struct component_ops dw_mipi_dsi_rockchip_dphy_ops = {
.bind = dw_mipi_dsi_rockchip_dphy_bind,
.unbind = dw_mipi_dsi_rockchip_dphy_unbind,
};
static int dw_mipi_dsi_dphy_init(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
int ret;
mutex_lock(&dsi->usage_mutex);
if (dsi->usage_mode != DW_DSI_USAGE_IDLE) {
DRM_DEV_ERROR(dsi->dev, "dsi controller already in use\n");
mutex_unlock(&dsi->usage_mutex);
return -EBUSY;
}
dsi->usage_mode = DW_DSI_USAGE_PHY;
mutex_unlock(&dsi->usage_mutex);
ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_dphy_ops);
if (ret < 0)
goto err_graph;
if (dsi->cdata->dphy_rx_init) {
ret = clk_prepare_enable(dsi->pclk);
if (ret < 0)
goto err_init;
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
clk_disable_unprepare(dsi->pclk);
goto err_init;
}
ret = dsi->cdata->dphy_rx_init(phy);
clk_disable_unprepare(dsi->grf_clk);
clk_disable_unprepare(dsi->pclk);
if (ret < 0)
goto err_init;
}
return 0;
err_init:
component_del(dsi->dev, &dw_mipi_dsi_rockchip_dphy_ops);
err_graph:
mutex_lock(&dsi->usage_mutex);
dsi->usage_mode = DW_DSI_USAGE_IDLE;
mutex_unlock(&dsi->usage_mutex);
return ret;
}
static int dw_mipi_dsi_dphy_exit(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
component_del(dsi->dev, &dw_mipi_dsi_rockchip_dphy_ops);
mutex_lock(&dsi->usage_mutex);
dsi->usage_mode = DW_DSI_USAGE_IDLE;
mutex_unlock(&dsi->usage_mutex);
return 0;
}
static int dw_mipi_dsi_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct phy_configure_opts_mipi_dphy *config = &opts->mipi_dphy;
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
int ret;
ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
if (ret)
return ret;
dsi->dphy_config = *config;
dsi->lane_mbps = div_u64(config->hs_clk_rate, 1000 * 1000 * 1);
return 0;
}
static int dw_mipi_dsi_dphy_power_on(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
int i, ret;
DRM_DEV_DEBUG(dsi->dev, "lanes %d - data_rate_mbps %u\n",
dsi->dphy_config.lanes, dsi->lane_mbps);
i = max_mbps_to_parameter(dsi->lane_mbps);
if (i < 0) {
DRM_DEV_ERROR(dsi->dev, "failed to get parameter for %dmbps clock\n",
dsi->lane_mbps);
return i;
}
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0) {
DRM_DEV_ERROR(dsi->dev, "failed to enable device: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dsi->pclk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk: %d\n", ret);
goto err_pclk;
}
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
goto err_grf_clk;
}
ret = clk_prepare_enable(dsi->phy_cfg_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk: %d\n", ret);
goto err_phy_cfg_clk;
}
/* do soc-variant specific init */
if (dsi->cdata->dphy_rx_power_on) {
ret = dsi->cdata->dphy_rx_power_on(phy);
if (ret < 0) {
DRM_DEV_ERROR(dsi->dev, "hardware-specific phy bringup failed: %d\n", ret);
goto err_pwr_on;
}
}
/*
* Configure hsfreqrange according to frequency values
* Set clock lane and hsfreqrange by lane0(test code 0x44)
*/
dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_CLK, 0);
dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_0,
HSFREQRANGE_SEL(dppa_map[i].hsfreqrange));
dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_1, 0);
dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_2, 0);
dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_3, 0);
/* Normal operation */
dw_mipi_dsi_phy_write(dsi, 0x0, 0);
clk_disable_unprepare(dsi->phy_cfg_clk);
clk_disable_unprepare(dsi->grf_clk);
return ret;
err_pwr_on:
clk_disable_unprepare(dsi->phy_cfg_clk);
err_phy_cfg_clk:
clk_disable_unprepare(dsi->grf_clk);
err_grf_clk:
clk_disable_unprepare(dsi->pclk);
err_pclk:
pm_runtime_put(dsi->dev);
return ret;
}
static int dw_mipi_dsi_dphy_power_off(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
int ret;
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
return ret;
}
if (dsi->cdata->dphy_rx_power_off) {
ret = dsi->cdata->dphy_rx_power_off(phy);
if (ret < 0)
DRM_DEV_ERROR(dsi->dev, "hardware-specific phy shutdown failed: %d\n", ret);
}
clk_disable_unprepare(dsi->grf_clk);
clk_disable_unprepare(dsi->pclk);
pm_runtime_put(dsi->dev);
return ret;
}
static const struct phy_ops dw_mipi_dsi_dphy_ops = {
.configure = dw_mipi_dsi_dphy_configure,
.power_on = dw_mipi_dsi_dphy_power_on,
.power_off = dw_mipi_dsi_dphy_power_off,
.init = dw_mipi_dsi_dphy_init,
.exit = dw_mipi_dsi_dphy_exit,
};
static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev)
{
struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
int ret;
/*
* Re-configure DSI state, if we were previously initialized. We need
* to do this before rockchip_drm_drv tries to re-enable() any panels.
*/
if (dsi->dsi_bound) {
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
return ret;
}
dw_mipi_dsi_rockchip_config(dsi);
if (dsi->slave)
dw_mipi_dsi_rockchip_config(dsi->slave);
clk_disable_unprepare(dsi->grf_clk);
}
return 0;
}
static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume)
};
static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct dw_mipi_dsi_rockchip *dsi;
struct phy_provider *phy_provider;
struct resource *res;
const struct rockchip_dw_dsi_chip_data *cdata =
of_device_get_match_data(dev);
int ret, i;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->base)) {
DRM_DEV_ERROR(dev, "Unable to get dsi registers\n");
return PTR_ERR(dsi->base);
}
i = 0;
while (cdata[i].reg) {
if (cdata[i].reg == res->start) {
dsi->cdata = &cdata[i];
break;
}
i++;
}
if (!dsi->cdata) {
DRM_DEV_ERROR(dev, "no dsi-config for %s node\n", np->name);
return -EINVAL;
}
/* try to get a possible external dphy */
dsi->phy = devm_phy_optional_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
DRM_DEV_ERROR(dev, "failed to get mipi dphy: %d\n", ret);
return ret;
}
dsi->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dsi->pclk)) {
ret = PTR_ERR(dsi->pclk);
DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret);
return ret;
}
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
if (dsi->phy) {
/*
* if external phy is present, pll will be
* generated there.
*/
dsi->pllref_clk = NULL;
} else {
ret = PTR_ERR(dsi->pllref_clk);
DRM_DEV_ERROR(dev,
"Unable to get pll reference clock: %d\n",
ret);
return ret;
}
}
if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
if (IS_ERR(dsi->phy_cfg_clk)) {
ret = PTR_ERR(dsi->phy_cfg_clk);
DRM_DEV_ERROR(dev,
"Unable to get phy_cfg_clk: %d\n", ret);
return ret;
}
}
if (dsi->cdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
dsi->grf_clk = devm_clk_get(dev, "grf");
if (IS_ERR(dsi->grf_clk)) {
ret = PTR_ERR(dsi->grf_clk);
DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
return ret;
}
}
dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dsi->grf_regmap)) {
DRM_DEV_ERROR(dev, "Unable to get rockchip,grf\n");
return PTR_ERR(dsi->grf_regmap);
}
dsi->dev = dev;
dsi->pdata.base = dsi->base;
dsi->pdata.max_data_lanes = dsi->cdata->max_data_lanes;
dsi->pdata.phy_ops = &dw_mipi_dsi_rockchip_phy_ops;
dsi->pdata.host_ops = &dw_mipi_dsi_rockchip_host_ops;
dsi->pdata.priv_data = dsi;
platform_set_drvdata(pdev, dsi);
mutex_init(&dsi->usage_mutex);
dsi->dphy = devm_phy_create(dev, NULL, &dw_mipi_dsi_dphy_ops);
if (IS_ERR(dsi->dphy)) {
DRM_DEV_ERROR(&pdev->dev, "failed to create PHY\n");
return PTR_ERR(dsi->dphy);
}
phy_set_drvdata(dsi->dphy, dsi);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
dsi->dmd = dw_mipi_dsi_probe(pdev, &dsi->pdata);
if (IS_ERR(dsi->dmd)) {
ret = PTR_ERR(dsi->dmd);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev,
"Failed to probe dw_mipi_dsi: %d\n", ret);
return ret;
}
return 0;
}
static void dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(dsi->dmd);
}
static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
{
.reg = 0xff450000,
.lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
.lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
PX30_DSI_LCDC_SEL),
.lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
.lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
PX30_DSI_FORCERXMODE |
PX30_DSI_FORCETXSTOPMODE),
.max_data_lanes = 4,
},
{ /* sentinel */ }
};
static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
.max_data_lanes = 4,
},
{
.reg = 0xff964000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
.max_data_lanes = 4,
},
{ /* sentinel */ }
};
static int rk3399_dphy_tx1rx1_init(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
/*
* Set TX1RX1 source to isp1.
* Assume ISP0 is supplied by the RX0 dphy.
*/
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
HIWORD_UPDATE(0, RK3399_TXRX_SRC_SEL_ISP0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
HIWORD_UPDATE(0, RK3399_TXRX_BASEDIR));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
return 0;
}
static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
/* tester reset pulse */
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_TESTCLR);
usleep_range(100, 150);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
HIWORD_UPDATE(RK3399_TXRX_BASEDIR, RK3399_TXRX_BASEDIR));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
HIWORD_UPDATE(0, RK3399_DSI1_FORCERXMODE));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
HIWORD_UPDATE(0, RK3399_DSI1_FORCETXSTOPMODE));
/* Disable lane turn around, which is ignored in receive mode */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
HIWORD_UPDATE(0, RK3399_TXRX_TURNREQUEST));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
HIWORD_UPDATE(RK3399_DSI1_TURNDISABLE,
RK3399_DSI1_TURNDISABLE));
usleep_range(100, 150);
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
usleep_range(100, 150);
/* Enable dphy lanes */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
HIWORD_UPDATE(GENMASK(dsi->dphy_config.lanes - 1, 0),
RK3399_DSI1_ENABLE));
usleep_range(100, 150);
return 0;
}
static int rk3399_dphy_tx1rx1_power_off(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
return 0;
}
static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
RK3399_DSI0_LCDC_SEL),
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
.lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
RK3399_DSI0_TURNDISABLE |
RK3399_DSI0_FORCETXSTOPMODE |
RK3399_DSI0_FORCERXMODE),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
},
{
.reg = 0xff968000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
RK3399_DSI1_LCDC_SEL),
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
.lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
RK3399_DSI1_FORCETXSTOPMODE |
RK3399_DSI1_FORCERXMODE |
RK3399_DSI1_ENABLE),
.lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
.lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
RK3399_TXRX_ENABLECLK,
RK3399_TXRX_MASTERSLAVEZ |
RK3399_TXRX_ENABLECLK |
RK3399_TXRX_BASEDIR),
.enable_grf_reg = RK3399_GRF_SOC_CON23,
.enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
.dphy_rx_init = rk3399_dphy_tx1rx1_init,
.dphy_rx_power_on = rk3399_dphy_tx1rx1_power_on,
.dphy_rx_power_off = rk3399_dphy_tx1rx1_power_off,
},
{ /* sentinel */ }
};
static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
{
.reg = 0xfe060000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
.lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS |
RK3568_DSI0_FORCETXSTOPMODE |
RK3568_DSI0_TURNDISABLE |
RK3568_DSI0_FORCERXMODE),
.max_data_lanes = 4,
},
{
.reg = 0xfe070000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
.lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS |
RK3568_DSI1_FORCETXSTOPMODE |
RK3568_DSI1_TURNDISABLE |
RK3568_DSI1_FORCERXMODE),
.max_data_lanes = 4,
},
{ /* sentinel */ }
};
static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
{
.compatible = "rockchip,px30-mipi-dsi",
.data = &px30_chip_data,
}, {
.compatible = "rockchip,rk3288-mipi-dsi",
.data = &rk3288_chip_data,
}, {
.compatible = "rockchip,rk3399-mipi-dsi",
.data = &rk3399_chip_data,
}, {
.compatible = "rockchip,rk3568-mipi-dsi",
.data = &rk3568_chip_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
struct platform_driver dw_mipi_dsi_rockchip_driver = {
.probe = dw_mipi_dsi_rockchip_probe,
.remove_new = dw_mipi_dsi_rockchip_remove,
.driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
.pm = &dw_mipi_dsi_rockchip_pm_ops,
.name = "dw-mipi-dsi-rockchip",
/*
* For dual-DSI display, one DSI pokes at the other DSI's
* drvdata in dw_mipi_dsi_rockchip_find_second(). This is not
* safe for asynchronous probe.
*/
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
| linux-master | drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2011 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#define INTEL_LIMIT_I9XX_SDVO_DAC 0
#define INTEL_LIMIT_I9XX_LVDS 1
static const struct gma_limit_t psb_intel_limits[] = {
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1400000, .max = 2800000},
.n = {.min = 1, .max = 6},
.m = {.min = 70, .max = 120},
.m1 = {.min = 8, .max = 18},
.m2 = {.min = 3, .max = 7},
.p = {.min = 5, .max = 80},
.p1 = {.min = 1, .max = 8},
.p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
.find_pll = gma_find_best_pll,
},
{ /* INTEL_LIMIT_I9XX_LVDS */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1400000, .max = 2800000},
.n = {.min = 1, .max = 6},
.m = {.min = 70, .max = 120},
.m1 = {.min = 8, .max = 18},
.m2 = {.min = 3, .max = 7},
.p = {.min = 7, .max = 98},
.p1 = {.min = 1, .max = 8},
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
.find_pll = gma_find_best_pll,
},
};
static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
int refclk)
{
const struct gma_limit_t *limit;
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
else
limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
return limit;
}
static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
}
/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
/* Must be on PIPE 1 for PSB */
return 1;
}
static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct gma_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false, is_tv = false;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
const struct gma_limit_t *limit;
/* No scan out no play */
if (crtc->primary->fb == NULL) {
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
return 0;
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
is_sdvo = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
break;
}
drm_connector_list_iter_end(&conn_iter);
refclk = 96000;
limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
adjusted_mode->clock, clock.dot);
return 0;
}
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
dpll = DPLL_VGA_MODE_DIS;
if (is_lvds) {
dpll |= DPLLB_MODE_LVDS;
dpll |= DPLL_DVO_HIGH_SPEED;
} else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int sdvo_pixel_multiply =
adjusted_mode->clock / mode->clock;
dpll |= DPLL_DVO_HIGH_SPEED;
dpll |=
(sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
}
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 1)) << 16;
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
case 7:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
break;
case 10:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
break;
case 14:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
if (is_tv) {
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
dspcntr |= DISPPLANE_SEL_PIPE_A;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
dpll |= DPLL_VCO_ENABLE;
/* Disable the panel fitter if it was on our pipe */
if (psb_intel_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
drm_mode_debug_printmodeline(mode);
if (dpll & DPLL_VCO_ENABLE) {
REG_WRITE(map->fp0, fp);
REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
REG_READ(map->dpll);
udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
u32 lvds = REG_READ(LVDS);
lvds &= ~LVDS_PIPEB_SELECT;
if (pipe == 1)
lvds |= LVDS_PIPEB_SELECT;
lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
/* Set the B0-B3 data pairs corresponding to
* whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
if (clock.p2 == 7)
lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more
* thoroughly into how panels behave in the two modes.
*/
REG_WRITE(LVDS, lvds);
REG_READ(LVDS);
}
REG_WRITE(map->fp0, fp);
REG_WRITE(map->dpll, dpll);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
REG_WRITE(map->dpll, dpll);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
REG_WRITE(map->pos, 0);
REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
gma_wait_for_vblank(dev);
return 0;
}
/* Returns the clock of the currently programmed mode of the given pipe. */
static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = REG_READ(map->fp0);
else
fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = p->fp0;
else
fp = p->fp1;
is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
LVDS_PORT_EN);
}
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
if (is_lvds) {
clock.p1 =
ffs((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
clock.p2 = 14;
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
psb_intel_clock(66000, &clock);
} else
psb_intel_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
else {
clock.p1 =
((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
}
if (dpll & PLL_P2_DIVIDE_BY_4)
clock.p2 = 4;
else
clock.p2 = 2;
psb_intel_clock(48000, &clock);
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
*/
return clock.dot;
}
/** Returns the currently programmed mode of the given pipe. */
struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
if (gma_power_begin(dev, false)) {
htot = REG_READ(map->htotal);
hsync = REG_READ(map->hsync);
vtot = REG_READ(map->vtotal);
vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
htot = p->htotal;
hsync = p->hsync;
vtot = p->vtotal;
vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
mode->clock = psb_intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
mode->vdisplay = (vtot & 0xffff) + 1;
mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
mode->vsync_start = (vsync & 0xffff) + 1;
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
return mode;
}
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
.mode_set = psb_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
.disable = gma_crtc_disable,
};
const struct gma_clock_funcs psb_clock_funcs = {
.clock = psb_intel_clock,
.limit = psb_intel_limit,
.pll_is_valid = gma_pll_is_valid,
};
/*
* Set the default value of cursor control and base register
* to zero. This is a workaround for h/w defect on Oaktrail
*/
static void psb_intel_cursor_init(struct drm_device *dev,
struct gma_crtc *gma_crtc)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
struct psb_gem_object *cursor_pobj;
if (dev_priv->ops->cursor_needs_phys) {
/* Allocate 4 pages of stolen mem for a hardware cursor. That
* is enough for the 64 x 64 ARGB cursors we support.
*/
cursor_pobj = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE);
if (IS_ERR(cursor_pobj)) {
gma_crtc->cursor_pobj = NULL;
goto out;
}
gma_crtc->cursor_pobj = cursor_pobj;
gma_crtc->cursor_addr = dev_priv->stolen_base + cursor_pobj->offset;
} else {
gma_crtc->cursor_pobj = NULL;
}
out:
REG_WRITE(control[gma_crtc->pipe], 0);
REG_WRITE(base[gma_crtc->pipe], 0);
}
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc;
int i;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
gma_crtc = kzalloc(sizeof(struct gma_crtc) +
(INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
GFP_KERNEL);
if (gma_crtc == NULL)
return;
gma_crtc->crtc_state =
kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
if (!gma_crtc->crtc_state) {
dev_err(dev->dev, "Crtc state error: No memory\n");
kfree(gma_crtc);
return;
}
drm_crtc_init(dev, &gma_crtc->base, &gma_crtc_funcs);
/* Set the CRTC clock functions from chip specific data */
gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
for (i = 0; i < 256; i++)
gma_crtc->lut_adj[i] = 0;
gma_crtc->mode_dev = mode_dev;
gma_crtc->cursor_addr = 0;
drm_crtc_helper_add(&gma_crtc->base,
dev_priv->ops->crtc_helper);
/* Setup the array of drm_connector pointer array */
gma_crtc->mode_set.crtc = &gma_crtc->base;
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
gma_crtc->mode_set.num_connectors = 0;
psb_intel_cursor_init(dev, gma_crtc);
/* Set to true so that the pipe is forced off on initial config. */
gma_crtc->active = true;
}
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
if (gma_crtc->pipe == pipe)
return crtc;
}
return NULL;
}
int gma_connector_clones(struct drm_device *dev, int type_mask)
{
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int index_mask = 0;
int entry = 0;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
if (type_mask & (1 << gma_encoder->type))
index_mask |= (1 << entry);
entry++;
}
drm_connector_list_iter_end(&conn_iter);
return index_mask;
}
| linux-master | drivers/gpu/drm/gma500/psb_intel_display.c |
/*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <[email protected]>
*
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
/**
* struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
* aux algorithm
* @running: set by the algo indicating whether an i2c is ongoing or whether
* the i2c bus is quiescent
* @address: i2c target address for the currently ongoing transfer
* @aux_ch: driver callback to transfer a single byte of the i2c payload
*/
struct i2c_algo_dp_aux_data {
bool running;
u16 address;
int (*aux_ch) (struct i2c_adapter *adapter,
int mode, uint8_t write_byte,
uint8_t *read_byte);
};
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
}
/*
* I2C over AUX CH
*/
/*
* Send the address. If the I2C link is running, this 'restarts'
* the connection with the new address, this is used for doing
* a write followed by a read (as needed for DDC)
*/
static int
i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_START;
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
algo_data->address = address;
algo_data->running = true;
return i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
}
/*
* Stop the I2C transaction. This closes out the link, sending
* a bare address packet with the MOT bit turned off
*/
static void
i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_STOP;
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
if (algo_data->running) {
(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
algo_data->running = false;
}
}
/*
* Write a single byte to the current I2C address, the
* I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
if (!algo_data->running)
return -EIO;
return i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
}
/*
* Read a single byte from the current I2C address, the
* I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
if (!algo_data->running)
return -EIO;
return i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
}
static int
i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
int ret = 0;
bool reading = false;
int m;
int b;
for (m = 0; m < num; m++) {
u16 len = msgs[m].len;
u8 *buf = msgs[m].buf;
reading = (msgs[m].flags & I2C_M_RD) != 0;
ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
if (ret < 0)
break;
if (reading) {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
if (ret < 0)
break;
}
} else {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
if (ret < 0)
break;
}
}
if (ret < 0)
break;
}
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
static u32
i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm i2c_dp_aux_algo = {
.master_xfer = i2c_algo_dp_aux_xfer,
.functionality = i2c_algo_dp_aux_functionality,
};
static void
i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
}
static int
i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
{
adapter->algo = &i2c_dp_aux_algo;
adapter->retries = 3;
i2c_dp_aux_reset_bus(adapter);
return 0;
}
/*
* FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
* be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
*/
static int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
error = i2c_add_adapter(adapter);
return error;
}
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (! (COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W && !in_dbg_master()) msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
#define DP_LINK_CONFIGURATION_SIZE 9
#define CDV_FAST_LINK_TRAIN 1
struct cdv_intel_dp {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int force_audio;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
struct gma_encoder *encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
bool panel_on;
};
struct ddi_regoff {
uint32_t PreEmph1;
uint32_t PreEmph2;
uint32_t VSwing1;
uint32_t VSwing2;
uint32_t VSwing3;
uint32_t VSwing4;
uint32_t VSwing5;
};
static struct ddi_regoff ddi_DP_train_table[] = {
{.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
.VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
.VSwing5 = 0x8158,},
{.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
.VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
.VSwing5 = 0x8258,},
};
static uint32_t dp_vswing_premph_table[] = {
0x55338954, 0x4000,
0x554d8954, 0x2000,
0x55668954, 0,
0x559ac0d4, 0x6000,
};
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @encoder: GMA encoder struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
static bool is_edp(struct gma_encoder *encoder)
{
return encoder->type == INTEL_OUTPUT_EDP;
}
static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
static int
cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_lane_count = 4;
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
default:
max_lane_count = 4;
}
}
return max_lane_count;
}
static int
cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
default:
max_link_bw = DP_LINK_BW_1_62;
break;
}
return max_link_bw;
}
static int
cdv_intel_dp_link_clock(uint8_t link_bw)
{
if (link_bw == DP_LINK_BW_2_7)
return 270000;
else
return 162000;
}
static int
cdv_intel_dp_link_required(int pixel_clock, int bpp)
{
return (pixel_clock * bpp + 7) / 8;
}
static int
cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
return (max_link_clock * max_lanes * 19) / 20;
}
static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
u32 pp;
if (intel_dp->panel_on) {
DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
return;
}
DRM_DEBUG_KMS("\n");
pp = REG_READ(PP_CONTROL);
pp |= EDP_FORCE_VDD;
REG_WRITE(PP_CONTROL, pp);
REG_READ(PP_CONTROL);
msleep(intel_dp->panel_power_up_delay);
}
static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp;
DRM_DEBUG_KMS("\n");
pp = REG_READ(PP_CONTROL);
pp &= ~EDP_FORCE_VDD;
REG_WRITE(PP_CONTROL, pp);
REG_READ(PP_CONTROL);
}
/* Returns true if the panel was already on when called */
static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
if (intel_dp->panel_on)
return true;
DRM_DEBUG_KMS("\n");
pp = REG_READ(PP_CONTROL);
pp &= ~PANEL_UNLOCK_MASK;
pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
REG_WRITE(PP_CONTROL, pp);
REG_READ(PP_CONTROL);
if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
intel_dp->panel_on = false;
} else
intel_dp->panel_on = true;
msleep(intel_dp->panel_power_up_delay);
return false;
}
static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp, idle_off_mask = PP_ON ;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
DRM_DEBUG_KMS("\n");
pp = REG_READ(PP_CONTROL);
if ((pp & POWER_TARGET_ON) == 0)
return;
intel_dp->panel_on = false;
pp &= ~PANEL_UNLOCK_MASK;
/* ILK workaround: disable reset around power sequence */
pp &= ~POWER_TARGET_ON;
pp &= ~EDP_FORCE_VDD;
pp &= ~EDP_BLC_ENABLE;
REG_WRITE(PP_CONTROL, pp);
REG_READ(PP_CONTROL);
DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
DRM_DEBUG_KMS("Error in turning off Panel\n");
}
msleep(intel_dp->panel_power_cycle_delay);
DRM_DEBUG_KMS("Over\n");
}
static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp;
DRM_DEBUG_KMS("\n");
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
msleep(300);
pp = REG_READ(PP_CONTROL);
pp |= EDP_BLC_ENABLE;
REG_WRITE(PP_CONTROL, pp);
gma_backlight_enable(dev);
}
static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
u32 pp;
DRM_DEBUG_KMS("\n");
gma_backlight_disable(dev);
msleep(10);
pp = REG_READ(PP_CONTROL);
pp &= ~EDP_BLC_ENABLE;
REG_WRITE(PP_CONTROL, pp);
msleep(intel_dp->backlight_off_delay);
}
static enum drm_mode_status
cdv_intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
int max_lanes = cdv_intel_dp_max_lane_count(encoder);
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
/* only refuse the mode on non eDP since we have seen some weird eDP panels
which are outside spec tolerances but somehow work by magic */
if (!is_edp(encoder) &&
(cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
if (is_edp(encoder)) {
if (cdv_intel_dp_link_required(mode->clock, 24)
> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
return MODE_CLOCK_HIGH;
}
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
return MODE_OK;
}
static uint32_t
pack_aux(uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
if (src_bytes > 4)
src_bytes = 4;
for (i = 0; i < src_bytes; i++)
v |= ((uint32_t) src[i]) << ((3-i) * 8);
return v;
}
static void
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
dst_bytes = 4;
for (i = 0; i < dst_bytes; i++)
dst[i] = src >> ((3-i) * 8);
}
static int
cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint32_t output_reg = intel_dp->output_reg;
struct drm_device *dev = encoder->base.dev;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
* On CDV platform it uses 200MHz as hrawclk.
*
*/
aux_clock_divider = 200 / 2;
precharge = 4;
if (is_edp(encoder))
precharge = 10;
if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
REG_READ(ch_ctl));
return -EBUSY;
}
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
REG_WRITE(ch_data + i,
pack_aux(send + i, send_bytes - i));
/* Send the command and wait for it to complete */
REG_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
for (;;) {
status = REG_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
udelay(100);
}
/* Clear done status and any errors */
REG_WRITE(ch_ctl,
status |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
if (status & DP_AUX_CH_CTL_DONE)
break;
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
return -EBUSY;
}
/* Check for timeout or receive error.
* Timeouts occur when the sink is not connected
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
return -EIO;
}
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT;
}
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
if (recv_bytes > recv_size)
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
unpack_aux(REG_READ(ch_data + i),
recv + i, recv_bytes - i);
return recv_bytes;
}
/* Write data to the aux channel in native mode */
static int
cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
if (send_bytes > 16)
return -1;
msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
return send_bytes;
}
/* Write a single byte to the aux channel in native mode */
static int
cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
uint16_t address, uint8_t byte)
{
return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
int msg_bytes;
uint8_t reply[20];
int reply_bytes;
uint8_t ack;
int ret;
msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
for (;;) {
ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
ack = reply[0] >> 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
}
static int
cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct cdv_intel_dp *intel_dp = container_of(adapter,
struct cdv_intel_dp,
adapter);
struct gma_encoder *encoder = intel_dp->encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes;
int ret;
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = DP_AUX_I2C_READ << 4;
else
msg[0] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
msg[0] |= DP_AUX_I2C_MOT << 4;
msg[1] = address >> 8;
msg[2] = address;
switch (mode) {
case MODE_I2C_WRITE:
msg[3] = 0;
msg[4] = write_byte;
msg_bytes = 5;
reply_bytes = 1;
break;
case MODE_I2C_READ:
msg[3] = 0;
msg_bytes = 4;
reply_bytes = 2;
break;
default:
msg_bytes = 3;
reply_bytes = 1;
break;
}
for (retry = 0; retry < 5; retry++) {
ret = cdv_intel_dp_aux_ch(encoder,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
udelay(100);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
return -EREMOTEIO;
}
switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
return reply_bytes - 1;
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
}
}
DRM_ERROR("too many retries, giving up\n");
return -EREMOTEIO;
}
static int
cdv_intel_dp_i2c_init(struct gma_connector *connector,
struct gma_encoder *encoder, const char *name)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
DRM_DEBUG_KMS("i2c_init %s\n", name);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = connector->base.kdev;
if (is_edp(encoder))
cdv_intel_edp_panel_vdd_on(encoder);
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
if (is_edp(encoder))
cdv_intel_edp_panel_vdd_off(encoder);
return ret;
}
static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
adjusted_mode->hdisplay = fixed_mode->hdisplay;
adjusted_mode->hsync_start = fixed_mode->hsync_start;
adjusted_mode->hsync_end = fixed_mode->hsync_end;
adjusted_mode->htotal = fixed_mode->htotal;
adjusted_mode->vdisplay = fixed_mode->vdisplay;
adjusted_mode->vsync_start = fixed_mode->vsync_start;
adjusted_mode->vsync_end = fixed_mode->vsync_end;
adjusted_mode->vtotal = fixed_mode->vtotal;
adjusted_mode->clock = fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
static bool
cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(encoder->dev);
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
int lane_count, clock;
int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
int refclock = mode->clock;
int bpp = 24;
if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
refclock = intel_dp->panel_fixed_mode->clock;
bpp = dev_priv->edp.bpp;
}
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = max_clock; clock >= 0; clock--) {
int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
return true;
}
}
}
if (is_edp(intel_encoder)) {
/* okay we failed just pick the highest */
intel_dp->lane_count = max_lane_count;
intel_dp->link_bw = bws[max_clock];
adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
return true;
}
return false;
}
struct cdv_intel_dp_m_n {
uint32_t tu;
uint32_t gmch_m;
uint32_t gmch_n;
uint32_t link_m;
uint32_t link_n;
};
static void
cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
/*
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
*den >>= 1;
}*/
uint64_t value, m;
m = *num;
value = m * (0x800000);
m = do_div(value, *den);
*num = value;
*den = 0x800000;
}
static void
cdv_intel_dp_compute_m_n(int bpp,
int nlanes,
int pixel_clock,
int link_clock,
struct cdv_intel_dp_m_n *m_n)
{
m_n->tu = 64;
m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
m_n->gmch_n = link_clock * nlanes;
cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
void
cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int lane_count = 4, bpp = 24;
struct cdv_intel_dp_m_n m_n;
int pipe = gma_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct gma_encoder *intel_encoder;
struct cdv_intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
intel_encoder = to_gma_encoder(encoder);
intel_dp = intel_encoder->dev_priv;
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
break;
} else if (is_edp(intel_encoder)) {
lane_count = intel_dp->lane_count;
bpp = dev_priv->edp.bpp;
break;
}
}
/*
* Compute the GMCH and Link ratios. The '3' here is
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
cdv_intel_dp_compute_m_n(bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
{
REG_WRITE(PIPE_GMCH_DATA_M(pipe),
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
static void
cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct drm_crtc *crtc = encoder->crtc;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DP_PORT_WIDTH_1;
break;
case 2:
intel_dp->DP |= DP_PORT_WIDTH_2;
break;
case 4:
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
if (intel_dp->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (gma_crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
if (is_edp(intel_encoder)) {
uint32_t pfit_control;
cdv_intel_edp_panel_on(intel_encoder);
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay)
pfit_control = PFIT_ENABLE;
else
pfit_control = 0;
pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
}
/* If the sink supports it, try to set the power state appropriately */
static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret, i;
/* Should have a valid DPCD by this point */
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
return;
if (mode != DRM_MODE_DPMS_ON) {
ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
DP_SET_POWER_D3);
if (ret != 1)
DRM_DEBUG_DRIVER("failed to write sink power state\n");
} else {
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
*/
for (i = 0; i < 3; i++) {
ret = cdv_intel_dp_aux_native_write_1(encoder,
DP_SET_POWER,
DP_SET_POWER_D0);
if (ret == 1)
break;
udelay(1000);
}
}
}
static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
{
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
int edp = is_edp(intel_encoder);
if (edp) {
cdv_intel_edp_backlight_off(intel_encoder);
cdv_intel_edp_panel_off(intel_encoder);
cdv_intel_edp_panel_vdd_on(intel_encoder);
}
/* Wake up the sink first */
cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
cdv_intel_dp_link_down(intel_encoder);
if (edp)
cdv_intel_edp_panel_vdd_off(intel_encoder);
}
static void cdv_intel_dp_commit(struct drm_encoder *encoder)
{
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
int edp = is_edp(intel_encoder);
if (edp)
cdv_intel_edp_panel_on(intel_encoder);
cdv_intel_dp_start_link_train(intel_encoder);
cdv_intel_dp_complete_link_train(intel_encoder);
if (edp)
cdv_intel_edp_backlight_on(intel_encoder);
}
static void
cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
uint32_t dp_reg = REG_READ(intel_dp->output_reg);
int edp = is_edp(intel_encoder);
if (mode != DRM_MODE_DPMS_ON) {
if (edp) {
cdv_intel_edp_backlight_off(intel_encoder);
cdv_intel_edp_panel_vdd_on(intel_encoder);
}
cdv_intel_dp_sink_dpms(intel_encoder, mode);
cdv_intel_dp_link_down(intel_encoder);
if (edp) {
cdv_intel_edp_panel_vdd_off(intel_encoder);
cdv_intel_edp_panel_off(intel_encoder);
}
} else {
if (edp)
cdv_intel_edp_panel_on(intel_encoder);
cdv_intel_dp_sink_dpms(intel_encoder, mode);
if (!(dp_reg & DP_PORT_EN)) {
cdv_intel_dp_start_link_train(intel_encoder);
cdv_intel_dp_complete_link_train(intel_encoder);
}
if (edp)
cdv_intel_edp_backlight_on(intel_encoder);
}
}
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
*/
static bool
cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
uint8_t *recv, int recv_bytes)
{
int ret, i;
/*
* Sinks are *supposed* to come up within 1ms from an off state,
* but we're also supposed to retry 3 times per the spec.
*/
for (i = 0; i < 3; i++) {
ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
recv_bytes);
if (ret == recv_bytes)
return true;
udelay(1000);
}
return false;
}
/*
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
static bool
cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
return cdv_intel_dp_aux_native_read_retry(encoder,
DP_LANE0_1_STATUS,
intel_dp->link_status,
DP_LINK_STATUS_SIZE);
}
static uint8_t
cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static uint8_t
cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
uint8_t l = cdv_intel_dp_link_status(link_status, i);
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static uint8_t
cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
uint8_t l = cdv_intel_dp_link_status(link_status, i);
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
static void
cdv_intel_get_adjust_train(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint8_t v = 0;
uint8_t p = 0;
int lane;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
if (v >= CDV_DP_VOLTAGE_MAX)
v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
static uint8_t
cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
uint8_t l = cdv_intel_dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
/* Check for clock recovery is done on all channels */
static bool
cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
{
int lane;
uint8_t lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = cdv_intel_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
/* Check to see if channel eq is done on all channels */
#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint8_t lane_align;
uint8_t lane_status;
int lane;
lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
return true;
}
static bool
cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
struct drm_device *dev = encoder->base.dev;
int ret;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
REG_WRITE(intel_dp->output_reg, dp_reg_value);
REG_READ(intel_dp->output_reg);
ret = cdv_intel_dp_aux_native_write_1(encoder,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
if (ret != 1) {
DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
dp_train_pat);
return false;
}
return true;
}
static bool
cdv_intel_dplink_set_level(struct gma_encoder *encoder,
uint8_t dp_train_pat)
{
int ret;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
ret = cdv_intel_dp_aux_native_write(encoder,
DP_TRAINING_LANE0_SET,
intel_dp->train_set,
intel_dp->lane_count);
if (ret != intel_dp->lane_count) {
DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
intel_dp->train_set[0], intel_dp->lane_count);
return false;
}
return true;
}
static void
cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
struct ddi_regoff *ddi_reg;
int vswing, premph, index;
if (intel_dp->output_reg == DP_B)
ddi_reg = &ddi_DP_train_table[0];
else
ddi_reg = &ddi_DP_train_table[1];
vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT;
if (vswing + premph > 3)
return;
#ifdef CDV_FAST_LINK_TRAIN
return;
#endif
DRM_DEBUG_KMS("Test2\n");
//return ;
cdv_sb_reset(dev);
/* ;Swing voltage programming
;gfx_dpio_set_reg(0xc058, 0x0505313A) */
cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
/* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
/* ;gfx_dpio_set_reg(0x8148, 0x55338954)
* The VSwing_PreEmph table is also considered based on the vswing/premp
*/
index = (vswing + premph) * 2;
if (premph == 1 && vswing == 1) {
cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
} else
cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
else
cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
/* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
/* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
/* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
/* ;Pre emphasis programming
* ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
*/
cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
index = 2 * premph + 1;
cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
return;
}
/* Enable corresponding port and start training pattern 1 */
static void
cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int i;
uint8_t voltage;
bool clock_recovery = false;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
DP |= DP_PORT_EN;
DP &= ~DP_LINK_TRAIN_MASK;
reg = DP;
reg |= DP_LINK_TRAIN_PAT_1;
/* Enable output, wait for it to become active */
REG_WRITE(intel_dp->output_reg, reg);
REG_READ(intel_dp->output_reg);
gma_wait_for_vblank(dev);
DRM_DEBUG_KMS("Link config\n");
/* Write the link configuration data */
cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
intel_dp->link_configuration,
2);
memset(intel_dp->train_set, 0, 4);
voltage = 0;
tries = 0;
clock_recovery = false;
DRM_DEBUG_KMS("Start train\n");
reg = DP | DP_LINK_TRAIN_PAT_1;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
intel_dp->train_set[0],
intel_dp->link_configuration[0],
intel_dp->link_configuration[1]);
if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
}
cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
/* Set training pattern 1 */
cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
udelay(200);
if (!cdv_intel_dp_get_link_status(encoder))
break;
DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("PT1 train is done\n");
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5)
break;
} else
tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
cdv_intel_get_adjust_train(encoder);
}
if (!clock_recovery) {
DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
}
intel_dp->DP = DP;
}
static void
cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
tries = 0;
cr_tries = 0;
DRM_DEBUG_KMS("\n");
reg = DP | DP_LINK_TRAIN_PAT_2;
for (;;) {
DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
intel_dp->train_set[0],
intel_dp->link_configuration[0],
intel_dp->link_configuration[1]);
/* channel eq pattern */
if (!cdv_intel_dp_set_link_train(encoder, reg,
DP_TRAINING_PATTERN_2)) {
DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
}
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
cdv_intel_dp_link_down(encoder);
break;
}
cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
udelay(1000);
if (!cdv_intel_dp_get_link_status(encoder))
break;
DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
/* Make sure clock is still ok */
if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
cdv_intel_dp_start_link_train(encoder);
cr_tries++;
continue;
}
if (cdv_intel_channel_eq_ok(encoder)) {
DRM_DEBUG_KMS("PT2 train is done\n");
break;
}
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
cdv_intel_dp_link_down(encoder);
cdv_intel_dp_start_link_train(encoder);
tries = 0;
cr_tries++;
continue;
}
/* Compute new intel_dp->train_set as requested by target */
cdv_intel_get_adjust_train(encoder);
++tries;
}
reg = DP | DP_LINK_TRAIN_OFF;
REG_WRITE(intel_dp->output_reg, reg);
REG_READ(intel_dp->output_reg);
cdv_intel_dp_aux_native_write_1(encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
cdv_intel_dp_link_down(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint32_t DP = intel_dp->DP;
if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
return;
DRM_DEBUG_KMS("\n");
{
DP &= ~DP_LINK_TRAIN_MASK;
REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
}
REG_READ(intel_dp->output_reg);
msleep(17);
REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
REG_READ(intel_dp->output_reg);
}
static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
enum drm_connector_status status;
status = connector_status_disconnected;
if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
if (intel_dp->dpcd[DP_DPCD_REV] != 0)
status = connector_status_connected;
}
if (status == connector_status_connected)
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],
intel_dp->dpcd[2], intel_dp->dpcd[3]);
return status;
}
/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
* \return false if DP port is disconnected.
*/
static enum drm_connector_status
cdv_intel_dp_detect(struct drm_connector *connector, bool force)
{
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
enum drm_connector_status status;
struct edid *edid = NULL;
int edp = is_edp(encoder);
intel_dp->has_audio = false;
if (edp)
cdv_intel_edp_panel_vdd_on(encoder);
status = cdv_dp_detect(encoder);
if (status != connector_status_connected) {
if (edp)
cdv_intel_edp_panel_vdd_off(encoder);
return status;
}
if (intel_dp->force_audio) {
intel_dp->has_audio = intel_dp->force_audio > 0;
} else {
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
}
if (edp)
cdv_intel_edp_panel_vdd_off(encoder);
return connector_status_connected;
}
static int cdv_intel_dp_get_modes(struct drm_connector *connector)
{
struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct edid *edid = NULL;
int ret = 0;
int edp = is_edp(intel_encoder);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
if (is_edp(intel_encoder)) {
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
cdv_intel_edp_panel_vdd_off(intel_encoder);
if (ret) {
if (edp && !intel_dp->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, newmode);
break;
}
}
}
return ret;
}
if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (intel_dp->panel_fixed_mode) {
intel_dp->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
}
}
if (intel_dp->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
}
return ret;
}
static bool
cdv_intel_dp_detect_audio(struct drm_connector *connector)
{
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
struct edid *edid;
bool has_audio = false;
int edp = is_edp(encoder);
if (edp)
cdv_intel_edp_panel_vdd_on(encoder);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
if (edp)
cdv_intel_edp_panel_vdd_off(encoder);
return has_audio;
}
static int
cdv_intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
if (i == intel_dp->force_audio)
return 0;
intel_dp->force_audio = i;
if (i == 0)
has_audio = cdv_intel_dp_detect_audio(connector);
else
has_audio = i > 0;
if (has_audio == intel_dp->has_audio)
return 0;
intel_dp->has_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_dp->color_range)
return 0;
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
goto done;
}
return -EINVAL;
done:
if (encoder->base.crtc) {
struct drm_crtc *crtc = encoder->base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->primary->fb);
}
return 0;
}
static void
cdv_intel_dp_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
if (is_edp(gma_encoder)) {
/* cdv_intel_panel_destroy_backlight(connector->dev); */
kfree(intel_dp->panel_fixed_mode);
intel_dp->panel_fixed_mode = NULL;
}
i2c_del_adapter(&intel_dp->adapter);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
.dpms = cdv_intel_dp_dpms,
.mode_fixup = cdv_intel_dp_mode_fixup,
.prepare = cdv_intel_dp_prepare,
.mode_set = cdv_intel_dp_mode_set,
.commit = cdv_intel_dp_commit,
};
static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = cdv_intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_intel_dp_set_property,
.destroy = cdv_intel_dp_destroy,
};
static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
.get_modes = cdv_intel_dp_get_modes,
.mode_valid = cdv_intel_dp_mode_valid,
.best_encoder = gma_best_encoder,
};
static void cdv_intel_dp_add_properties(struct drm_connector *connector)
{
cdv_intel_attach_force_audio_property(connector);
cdv_intel_attach_broadcast_rgb_property(connector);
}
/* check the VBT to see whether the eDP is on DP-D port */
static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct child_device_config *p_child;
int i;
if (!dev_priv->child_dev_num)
return false;
for (i = 0; i < dev_priv->child_dev_num; i++) {
p_child = dev_priv->child_dev + i;
if (p_child->dvo_port == PORT_IDPC &&
p_child->device_type == DEVICE_TYPE_eDP)
return true;
}
return false;
}
/* Cedarview display clock gating
We need this disable dot get correct behaviour while enabling
DP/eDP. TODO - investigate if we can turn it back to normality
after enabling */
static void cdv_disable_intel_clock_gating(struct drm_device *dev)
{
u32 reg_value;
reg_value = REG_READ(DSPCLK_GATE_D);
reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
DPUNIT_PIPEA_GATE_DISABLE |
DPCUNIT_CLOCK_GATE_DISABLE |
DPLSUNIT_CLOCK_GATE_DISABLE |
DPOUNIT_CLOCK_GATE_DISABLE |
DPIOUNIT_CLOCK_GATE_DISABLE);
REG_WRITE(DSPCLK_GATE_D, reg_value);
udelay(500);
}
void
cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct cdv_intel_dp *intel_dp;
const char *name = NULL;
int type = DRM_MODE_CONNECTOR_DisplayPort;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
goto err_connector;
intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
if (!intel_dp)
goto err_priv;
if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
connector = &gma_connector->base;
encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
if (type == DRM_MODE_CONNECTOR_DisplayPort)
gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
else
gma_encoder->type = INTEL_OUTPUT_EDP;
gma_encoder->dev_priv=intel_dp;
intel_dp->encoder = gma_encoder;
intel_dp->output_reg = output_reg;
drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/* Set up the DDC bus. */
switch (output_reg) {
case DP_B:
name = "DPDDC-B";
gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
break;
case DP_C:
name = "DPDDC-C";
gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
break;
}
cdv_disable_intel_clock_gating(dev);
cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
/* FIXME:fail check */
cdv_intel_dp_add_properties(connector);
if (is_edp(gma_encoder)) {
int ret;
struct edp_power_seq cur;
u32 pp_on, pp_off, pp_div;
u32 pwm_ctrl;
pp_on = REG_READ(PP_CONTROL);
pp_on &= ~PANEL_UNLOCK_MASK;
pp_on |= PANEL_UNLOCK_REGS;
REG_WRITE(PP_CONTROL, pp_on);
pwm_ctrl = REG_READ(BLC_PWM_CTL2);
pwm_ctrl |= PWM_PIPE_B;
REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
pp_on = REG_READ(PP_ON_DELAYS);
pp_off = REG_READ(PP_OFF_DELAYS);
pp_div = REG_READ(PP_DIVISOR);
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT);
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
intel_dp->backlight_on_delay = cur.t8 / 10;
intel_dp->backlight_off_delay = cur.t9 / 10;
intel_dp->panel_power_down_delay = cur.t10 / 10;
intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
intel_dp->panel_power_cycle_delay);
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
cdv_intel_edp_panel_vdd_on(gma_encoder);
ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
cdv_intel_edp_panel_vdd_off(gma_encoder);
if (ret <= 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],
intel_dp->dpcd[2], intel_dp->dpcd[3]);
}
/* The CDV reference driver moves pnale backlight setup into the displays that
have a backlight: this is a good idea and one we should probably adopt, however
we need to migrate all the drivers before we can do that */
/*cdv_intel_panel_setup_backlight(dev); */
}
return;
err_priv:
kfree(gma_connector);
err_connector:
kfree(gma_encoder);
}
| linux-master | drivers/gpu/drm/gma500/cdv_intel_dp.c |
/*
* Copyright 2006 Dave Airlie <[email protected]>
* Copyright © 2006-2007 Intel Corporation
* Jesse Barnes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <[email protected]>
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_sdvo_regs.h"
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
"PAL_N" , "PAL_NC" , "PAL_60" ,
"SECAM_B" , "SECAM_D" , "SECAM_G" ,
"SECAM_K" , "SECAM_K1", "SECAM_L" ,
"SECAM_60"
};
struct psb_intel_sdvo {
struct gma_encoder base;
struct i2c_adapter *i2c;
u8 slave_addr;
struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
/*
* Capabilities of the SDVO device returned by
* i830_sdvo_get_capabilities()
*/
struct psb_intel_sdvo_caps caps;
/* Pixel clock limitations reported by the SDVO device, in kHz */
int pixel_clock_min, pixel_clock_max;
/*
* For multiple function SDVO device,
* this is for current attached outputs.
*/
uint16_t attached_output;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
/**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
* to decide this for us, the S-Video output on our HDMI+S-Video card
* shows up as RGB1 (VGA).
*/
bool is_tv;
/* This is for current tv format name */
int tv_format_index;
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
* have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
* This is sdvo fixed panel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
u8 pixel_multiplier;
/* Input timings for adjusted_mode */
struct psb_intel_sdvo_dtd input_dtd;
/* Saved SDVO output states */
uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
};
struct psb_intel_sdvo_connector {
struct gma_connector base;
/* Mark the type of connector */
uint16_t output_flag;
int force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[ARRAY_SIZE(tv_format_names)];
int format_supported_num;
struct drm_property *tv_format;
/* add the property for the SDVO-TV */
struct drm_property *left;
struct drm_property *right;
struct drm_property *top;
struct drm_property *bottom;
struct drm_property *hpos;
struct drm_property *vpos;
struct drm_property *contrast;
struct drm_property *saturation;
struct drm_property *hue;
struct drm_property *sharpness;
struct drm_property *flicker_filter;
struct drm_property *flicker_filter_adaptive;
struct drm_property *flicker_filter_2d;
struct drm_property *tv_chroma_filter;
struct drm_property *tv_luma_filter;
struct drm_property *dot_crawl;
/* add the property for the SDVO-TV/LVDS */
struct drm_property *brightness;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
u32 max_vpos, cur_vpos;
u32 cur_brightness, max_brightness;
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
u32 cur_sharpness, max_sharpness;
u32 cur_flicker_filter, max_flicker_filter;
u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
u32 cur_flicker_filter_2d, max_flicker_filter_2d;
u32 cur_tv_chroma_filter, max_tv_chroma_filter;
u32 cur_tv_luma_filter, max_tv_luma_filter;
u32 cur_dot_crawl, max_dot_crawl;
};
static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
{
return container_of(encoder, struct psb_intel_sdvo, base.base);
}
static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
return container_of(gma_attached_encoder(connector),
struct psb_intel_sdvo, base);
}
static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
{
return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
}
static bool
psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
static bool
psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
int type);
static bool
psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
/*
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
u32 bval = val, cval = val;
int i, j;
int need_aux = IS_MRST(dev) ? 1 : 0;
for (j = 0; j <= need_aux; j++) {
if (psb_intel_sdvo->sdvo_reg == SDVOB)
cval = REG_READ_WITH_AUX(SDVOC, j);
else
bval = REG_READ_WITH_AUX(SDVOB, j);
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
REG_WRITE_WITH_AUX(SDVOB, bval, j);
REG_READ_WITH_AUX(SDVOB, j);
REG_WRITE_WITH_AUX(SDVOC, cval, j);
REG_READ_WITH_AUX(SDVOC, j);
}
}
}
static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
{
struct i2c_msg msgs[] = {
{
.addr = psb_intel_sdvo->slave_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
.addr = psb_intel_sdvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
}
};
int ret;
if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
return true;
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
/* Add the op code for SDVO enhancements */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
#define IS_SDVOB(reg) (reg == SDVOB)
#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo,
u8 cmd, const void *args, int args_len)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
int i, pos = 0;
char buffer[73];
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
for (i = 0; i < args_len; i++) {
BUF_PRINT("%02X ", ((u8 *)args)[i]);
}
for (; i < 8; i++) {
BUF_PRINT(" ");
}
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == ARRAY_SIZE(sdvo_cmd_names))
BUF_PRINT("(%02X)", cmd);
drm_WARN_ON(dev, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(psb_intel_sdvo), cmd, buffer);
}
static const char *cmd_status_names[] = {
"Power on",
"Success",
"Not supported",
"Invalid arg",
"Pending",
"Target not specified",
"Scaling not supported"
};
#define MAX_ARG_LEN 32
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
u8 buf[MAX_ARG_LEN*2 + 2], status;
struct i2c_msg msgs[MAX_ARG_LEN + 3];
int i, ret;
if (args_len > MAX_ARG_LEN) {
DRM_ERROR("Need to increase arg length\n");
return false;
}
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
msgs[i].addr = psb_intel_sdvo->slave_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
msgs[i].addr = psb_intel_sdvo->slave_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
buf[2*i + 0] = SDVO_I2C_OPCODE;
buf[2*i + 1] = cmd;
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
msgs[i+1].addr = psb_intel_sdvo->slave_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
msgs[i+2].addr = psb_intel_sdvo->slave_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
return false;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
return false;
}
return true;
}
static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
void *response, int response_len)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
char buffer[73];
int i, pos = 0;
u8 retry = 5;
u8 status;
/*
* The documentation states that all commands will be
* processed within 15µs, and that we need only poll
* the status byte a maximum of 3 times in order for the
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
*/
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
while ((status == SDVO_CMD_STATUS_PENDING ||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
udelay(15);
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
}
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
BUF_PRINT("(%s)", cmd_status_names[status]);
else
BUF_PRINT("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
/* Read the command response */
for (i = 0; i < response_len; i++) {
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
drm_WARN_ON(dev, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(psb_intel_sdvo), buffer);
return true;
log_fail:
DRM_DEBUG_KMS("%s: R: ... failed %s\n",
SDVO_NAME(psb_intel_sdvo), buffer);
return false;
}
static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
{
if (mode->clock >= 100000)
return 1;
else if (mode->clock >= 50000)
return 2;
else
return 4;
}
static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
u8 ddc_bus)
{
/* This must be the immediately preceding write before the i2c xfer */
return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&ddc_bus, 1);
}
static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
{
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
return false;
return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
}
static bool
psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
{
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
return false;
return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
}
static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
{
struct psb_intel_sdvo_set_target_input_args targets = {0};
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_TARGET_INPUT,
&targets, sizeof(targets));
}
/*
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
{
struct psb_intel_sdvo_get_trained_inputs_response response;
BUILD_BUG_ON(sizeof(response) != 1);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
*input_1 = response.input0_trained;
*input_2 = response.input1_trained;
return true;
}
static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
u16 outputs)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_ACTIVE_OUTPUTS,
&outputs, sizeof(outputs));
}
static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
int mode)
{
u8 state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
state = SDVO_ENCODER_STATE_ON;
break;
case DRM_MODE_DPMS_STANDBY:
state = SDVO_ENCODER_STATE_STANDBY;
break;
case DRM_MODE_DPMS_SUSPEND:
state = SDVO_ENCODER_STATE_SUSPEND;
break;
case DRM_MODE_DPMS_OFF:
state = SDVO_ENCODER_STATE_OFF;
break;
}
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
}
static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
int *clock_min,
int *clock_max)
{
struct psb_intel_sdvo_pixel_clock_range clocks;
BUILD_BUG_ON(sizeof(clocks) != 4);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
return true;
}
static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
u16 outputs)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_TARGET_OUTPUT,
&outputs, sizeof(outputs));
}
static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_timing(psb_intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_timing(psb_intel_sdvo,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct psb_intel_sdvo_preferred_input_timing_args args;
memset(&args, 0, sizeof(args));
args.clock = clock;
args.width = width;
args.height = height;
args.interlace = 0;
if (psb_intel_sdvo->is_lvds &&
(psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
&args, sizeof(args));
}
static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_dtd *dtd)
{
BUILD_BUG_ON(sizeof(dtd->part1) != 8);
BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
&dtd->part2, sizeof(dtd->part2));
}
static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
}
static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
const struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
uint16_t h_sync_offset, v_sync_offset;
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
/* do some mode translations */
h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
dtd->part1.clock = mode->clock / 10;
dtd->part1.h_active = width & 0xff;
dtd->part1.h_blank = h_blank_len & 0xff;
dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
((h_blank_len >> 8) & 0xf);
dtd->part1.v_active = height & 0xff;
dtd->part1.v_blank = v_blank_len & 0xff;
dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
((v_blank_len >> 8) & 0xf);
dtd->part2.h_sync_off = h_sync_offset & 0xff;
dtd->part2.h_sync_width = h_sync_len & 0xff;
dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
(v_sync_len & 0xf);
dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
dtd->part2.dtd_flags |= 0x2;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= 0x4;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
dtd->part2.reserved = 0;
}
static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
const struct psb_intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode->htotal = mode->hdisplay + dtd->part1.h_blank;
mode->htotal += (dtd->part1.h_high & 0xf) << 8;
mode->vdisplay = dtd->part1.v_active;
mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode->vsync_start = mode->vdisplay;
mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode->vsync_end = mode->vsync_start +
(dtd->part2.v_sync_off_width & 0xf);
mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & 0x2)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dtd->part2.dtd_flags & 0x4)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
{
struct psb_intel_sdvo_encode encode;
BUILD_BUG_ON(sizeof(encode) != 2);
return psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
}
static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
uint8_t mode)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
uint8_t mode)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
#if 0
static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
{
int i, j;
uint8_t set_buf_index[2];
uint8_t av_split;
uint8_t buf_size;
uint8_t buf[48];
uint8_t *pos;
psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2);
psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
psb_intel_sdvo_read_response(encoder, &buf_size, 1);
pos = buf;
for (j = 0; j <= buf_size; j += 8) {
psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
NULL, 0);
psb_intel_sdvo_read_response(encoder, pos, 8);
pos += 8;
}
}
}
#endif
static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
{
DRM_INFO("HDMI is not supported yet");
return false;
}
static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
{
struct psb_intel_sdvo_tv_format format;
uint32_t format_map;
format_map = 1 << psb_intel_sdvo->tv_format_index;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
BUILD_BUG_ON(sizeof(format) != 6);
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_TV_FORMAT,
&format, sizeof(format));
}
static bool
psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
const struct drm_display_mode *mode)
{
struct psb_intel_sdvo_dtd output_dtd;
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
psb_intel_sdvo->attached_output))
return false;
psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
return false;
return true;
}
static bool
psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* Reset the input timing to the screen. Assume always input 0. */
if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
return false;
if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay))
return false;
if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
&psb_intel_sdvo->input_dtd))
return false;
psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
* timings, even though this isn't really the right place in
* the sequence to do it. Oh well.
*/
if (psb_intel_sdvo->is_tv) {
if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
return false;
(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
mode,
adjusted_mode);
} else if (psb_intel_sdvo->is_lvds) {
if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
psb_intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
mode,
adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
*/
psb_intel_sdvo->pixel_multiplier =
psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
adjusted_mode->clock *= psb_intel_sdvo->pixel_multiplier;
return true;
}
static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 sdvox;
struct psb_intel_sdvo_in_out_map in_out;
struct psb_intel_sdvo_dtd input_dtd;
int rate;
int need_aux = IS_MRST(dev) ? 1 : 0;
if (!mode)
return;
/* First, set the input mapping for the first input to our controlled
* output. This is only correct if we're a single-input device, in
* which case the first input is the output from the appropriate SDVO
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
in_out.in0 = psb_intel_sdvo->attached_output;
in_out.in1 = 0;
psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
/* Set the output timings to the screen */
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
psb_intel_sdvo->attached_output))
return;
/* We have tried to get input timing in mode_fixup, and filled into
* adjusted_mode.
*/
if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
input_dtd = psb_intel_sdvo->input_dtd;
} else {
/* Set the output timing to the screen */
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
psb_intel_sdvo->attached_output))
return;
psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
return;
if (psb_intel_sdvo->has_hdmi_monitor) {
psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
SDVO_COLORIMETRY_RGB256);
psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
} else
psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
if (psb_intel_sdvo->is_tv &&
!psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
return;
(void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
switch (psb_intel_sdvo->pixel_multiplier) {
default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
}
if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
return;
/* Set the SDVO control regs. */
if (need_aux)
sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
switch (psb_intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
case SDVOC:
sdvox &= SDVOC_PRESERVE_MASK;
break;
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
if (gma_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (psb_intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
/* FIXME: Check if this is needed for PSB
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
*/
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
}
static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 temp;
int i;
int need_aux = IS_MRST(dev) ? 1 : 0;
switch (mode) {
case DRM_MODE_DPMS_ON:
DRM_DEBUG("DPMS_ON");
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG("DPMS_OFF");
break;
default:
DRM_DEBUG("DPMS: %d", mode);
}
if (mode != DRM_MODE_DPMS_ON) {
psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
if (0)
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
if (mode == DRM_MODE_DPMS_OFF) {
if (need_aux)
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
bool input1, input2;
u8 status;
if (need_aux)
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
gma_wait_for_vblank(dev);
status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
"sync\n", SDVO_NAME(psb_intel_sdvo));
}
if (0)
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
}
return;
}
static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (psb_intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
if (psb_intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
if (psb_intel_sdvo->is_lvds) {
if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
{
BUILD_BUG_ON(sizeof(*caps) != 8);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
return false;
DRM_DEBUG_KMS("SDVO capabilities:\n"
" vendor_id: %d\n"
" device_id: %d\n"
" device_rev_id: %d\n"
" sdvo_version_major: %d\n"
" sdvo_version_minor: %d\n"
" sdvo_inputs_mask: %d\n"
" smooth_scaling: %d\n"
" sharp_scaling: %d\n"
" up_scaling: %d\n"
" down_scaling: %d\n"
" stall_support: %d\n"
" output_flags: %d\n",
caps->vendor_id,
caps->device_id,
caps->device_rev_id,
caps->sdvo_version_major,
caps->sdvo_version_minor,
caps->sdvo_inputs_mask,
caps->smooth_scaling,
caps->sharp_scaling,
caps->up_scaling,
caps->down_scaling,
caps->stall_support,
caps->output_flags);
return true;
}
static bool
psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
{
/* Is there more than one type of output? */
int caps = psb_intel_sdvo->caps.output_flags & 0xf;
return caps & -caps;
}
static struct edid *
psb_intel_sdvo_get_edid(struct drm_connector *connector)
{
struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
return drm_get_edid(connector, &sdvo->ddc);
}
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
static enum drm_connector_status
psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
enum drm_connector_status status;
struct edid *edid;
edid = psb_intel_sdvo_get_edid(connector);
if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
psb_intel_sdvo->ddc_bus = ddc;
edid = psb_intel_sdvo_get_edid(connector);
if (edid)
break;
}
/*
* If we found the EDID on the other bus,
* assume that is the correct DDC bus.
*/
if (edid == NULL)
psb_intel_sdvo->ddc_bus = saved_ddc;
}
/*
* When there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector.
*/
if (edid == NULL)
edid = psb_intel_sdvo_get_analog_edid(connector);
status = connector_status_unknown;
if (edid != NULL) {
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
if (psb_intel_sdvo->is_hdmi) {
psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
}
} else
status = connector_status_disconnected;
kfree(edid);
}
if (status == connector_status_connected) {
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
if (psb_intel_sdvo_connector->force_audio)
psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
}
return status;
}
static enum drm_connector_status
psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
enum drm_connector_status ret;
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
if (psb_intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
response & 0xff, response >> 8,
psb_intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
psb_intel_sdvo->attached_output = response;
psb_intel_sdvo->has_hdmi_monitor = false;
psb_intel_sdvo->has_hdmi_audio = false;
if ((psb_intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (IS_TMDS(psb_intel_sdvo_connector))
ret = psb_intel_sdvo_hdmi_sink_detect(connector);
else {
struct edid *edid;
/* if we have an edid check it matches the connection */
edid = psb_intel_sdvo_get_edid(connector);
if (edid == NULL)
edid = psb_intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
ret = connector_status_disconnected;
else
ret = connector_status_connected;
kfree(edid);
} else
ret = connector_status_connected;
}
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
psb_intel_sdvo->is_tv = false;
psb_intel_sdvo->is_lvds = false;
psb_intel_sdvo->base.needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
psb_intel_sdvo->is_tv = true;
psb_intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
return ret;
}
static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct edid *edid;
/* set the bus switch and get the modes */
edid = psb_intel_sdvo_get_edid(connector);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
* link between analog and digital outputs. So, if the regular SDVO
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
if (edid == NULL)
edid = psb_intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
if (connector_is_digital == monitor_is_digital) {
drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
kfree(edid);
}
}
/*
* Set of SDVO TV modes.
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
416, 0, 240, 241, 272, 273, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
496, 0, 300, 301, 332, 333, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
736, 0, 350, 351, 382, 383, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
736, 0, 400, 401, 432, 433, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
736, 0, 480, 481, 512, 513, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
800, 0, 480, 481, 512, 513, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
800, 0, 576, 577, 608, 609, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
816, 0, 350, 351, 382, 383, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
816, 0, 400, 401, 432, 433, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
816, 0, 480, 481, 512, 513, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
816, 0, 540, 541, 572, 573, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
816, 0, 576, 577, 608, 609, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
864, 0, 576, 577, 608, 609, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
896, 0, 600, 601, 632, 633, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
928, 0, 624, 625, 656, 657, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
1016, 0, 766, 767, 798, 799, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
1120, 0, 768, 769, 800, 801, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
1376, 0, 1024, 1025, 1056, 1057, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
/* Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << psb_intel_sdvo->tv_format_index;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
return;
BUILD_BUG_ON(sizeof(tv_res) != 3);
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
return;
if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
&sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
}
static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct drm_display_mode *newmode;
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
if (list_empty(&connector->probed_modes) == false)
goto end;
/* Fetch modes from VBT */
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
if (newmode != NULL) {
/* Guarantee the mode is preferred */
newmode->type = (DRM_MODE_TYPE_PREFERRED |
DRM_MODE_TYPE_DRIVER);
drm_mode_probed_add(connector, newmode);
}
}
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
psb_intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
0);
psb_intel_sdvo->is_lvds = true;
break;
}
}
}
static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
if (IS_TV(psb_intel_sdvo_connector))
psb_intel_sdvo_get_tv_modes(connector);
else if (IS_LVDS(psb_intel_sdvo_connector))
psb_intel_sdvo_get_lvds_modes(connector);
else
psb_intel_sdvo_get_ddc_modes(connector);
return !list_empty(&connector->probed_modes);
}
static void psb_intel_sdvo_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct edid *edid;
bool has_audio = false;
if (!psb_intel_sdvo->is_hdmi)
return false;
edid = psb_intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
return has_audio;
}
static int
psb_intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
if (i == psb_intel_sdvo_connector->force_audio)
return 0;
psb_intel_sdvo_connector->force_audio = i;
if (i == 0)
has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
else
has_audio = i > 0;
if (has_audio == psb_intel_sdvo->has_hdmi_audio)
return 0;
psb_intel_sdvo->has_hdmi_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!psb_intel_sdvo->color_range)
return 0;
psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
#define CHECK_PROPERTY(name, NAME) \
if (psb_intel_sdvo_connector->name == property) { \
if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
cmd = SDVO_CMD_SET_##NAME; \
psb_intel_sdvo_connector->cur_##name = temp_value; \
goto set_value; \
}
if (property == psb_intel_sdvo_connector->tv_format) {
if (val >= ARRAY_SIZE(tv_format_names))
return -EINVAL;
if (psb_intel_sdvo->tv_format_index ==
psb_intel_sdvo_connector->tv_format_supported[val])
return 0;
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
goto done;
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
psb_intel_sdvo_connector->left_margin = temp_value;
psb_intel_sdvo_connector->right_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_hscan -
psb_intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
psb_intel_sdvo_connector->left_margin = temp_value;
psb_intel_sdvo_connector->right_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_hscan -
psb_intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
psb_intel_sdvo_connector->top_margin = temp_value;
psb_intel_sdvo_connector->bottom_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_vscan -
psb_intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
psb_intel_sdvo_connector->top_margin = temp_value;
psb_intel_sdvo_connector->bottom_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_vscan -
psb_intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
}
CHECK_PROPERTY(hpos, HPOS)
CHECK_PROPERTY(vpos, VPOS)
CHECK_PROPERTY(saturation, SATURATION)
CHECK_PROPERTY(contrast, CONTRAST)
CHECK_PROPERTY(hue, HUE)
CHECK_PROPERTY(brightness, BRIGHTNESS)
CHECK_PROPERTY(sharpness, SHARPNESS)
CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
return -EINVAL; /* unknown property */
set_value:
if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
return -EIO;
done:
if (psb_intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->primary->fb);
}
return 0;
#undef CHECK_PROPERTY
}
static void psb_intel_sdvo_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
}
static void psb_intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
struct drm_crtc *crtc = encoder->crtc;
REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
/* Force a full mode set on the crtc. We're supposed to have the
mode_config lock already. */
if (connector->status == connector_status_connected)
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
NULL);
}
static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
.dpms = psb_intel_sdvo_dpms,
.mode_fixup = psb_intel_sdvo_mode_fixup,
.prepare = gma_encoder_prepare,
.mode_set = psb_intel_sdvo_mode_set,
.commit = gma_encoder_commit,
};
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = psb_intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_sdvo_set_property,
.destroy = psb_intel_sdvo_destroy,
};
static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
.get_modes = psb_intel_sdvo_get_modes,
.mode_valid = psb_intel_sdvo_mode_valid,
.best_encoder = gma_best_encoder,
};
static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
psb_intel_sdvo->sdvo_lvds_fixed_mode);
i2c_del_adapter(&psb_intel_sdvo->ddc);
gma_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
.destroy = psb_intel_sdvo_enc_destroy,
};
static void
psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
{
/* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
* We need to figure out if this is true for all available poulsbo
* hardware, or if we need to fiddle with the guessing code above.
* The problem might go away if we can parse sdvo mappings from bios */
sdvo->ddc_bus = 2;
#if 0
uint16_t mask = 0;
unsigned int num_bits;
/* Make a mask of outputs less than or equal to our own priority in the
* list.
*/
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
}
/* Count bits to find what number we are in the priority list. */
mask &= sdvo->caps.output_flags;
num_bits = hweight16(mask);
/* If more than 3 outputs, default to DDC bus 3 for now. */
if (num_bits > 3)
num_bits = 3;
/* Corresponds to SDVO_CONTROL_BUS_DDCx */
sdvo->ddc_bus = 1 << num_bits;
#endif
}
/*
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
* DDC bus number assignment is in a priority order of RGB outputs, then TMDS
* outputs, then LVDS outputs.
*/
static void
psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
struct psb_intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
if (IS_SDVOB(reg))
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
else
psb_intel_sdvo_guess_ddc_bus(sdvo);
}
static void
psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
struct psb_intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
u8 pin, speed;
if (IS_SDVOB(reg))
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
pin = GMBUS_PORT_DPB;
speed = GMBUS_RATE_1MHZ >> 8;
if (mapping->initialized) {
pin = mapping->i2c_pin;
speed = mapping->i2c_speed;
}
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
gma_intel_gmbus_set_speed(sdvo->i2c, speed);
gma_intel_gmbus_force_bit(sdvo->i2c, true);
} else
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
}
static bool
psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
}
static u8
psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
my_mapping = &dev_priv->sdvo_mappings[1];
other_mapping = &dev_priv->sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
if (my_mapping->slave_addr)
return my_mapping->slave_addr;
/* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
if (other_mapping->slave_addr) {
if (other_mapping->slave_addr == 0x70)
return 0x72;
else
return 0x70;
}
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
static void
psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
struct psb_intel_sdvo *encoder)
{
drm_connector_init(encoder->base.base.dev,
&connector->base.base,
&psb_intel_sdvo_connector_funcs,
connector->base.base.connector_type);
drm_connector_helper_add(&connector->base.base,
&psb_intel_sdvo_connector_helper_funcs);
connector->base.base.interlace_allowed = 0;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
connector->base.save = psb_intel_sdvo_save;
connector->base.restore = psb_intel_sdvo_restore;
gma_connector_attach_encoder(&connector->base, &encoder->base);
}
static void
psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
{
/* FIXME: We don't support HDMI at the moment
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
intel_attach_broadcast_rgb_property(&connector->base.base);
*/
}
static bool
psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
if (device == 0) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
// connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
psb_intel_sdvo->is_hdmi = true;
}
psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
if (psb_intel_sdvo->is_hdmi)
psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
return true;
}
static bool
psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
psb_intel_sdvo->controlled_output |= type;
psb_intel_sdvo_connector->output_flag = type;
psb_intel_sdvo->is_tv = true;
psb_intel_sdvo->base.needs_tv_clock = true;
psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
goto err;
if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
goto err;
return true;
err:
psb_intel_sdvo_destroy(connector);
return false;
}
static bool
psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
if (device == 0) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
psb_intel_sdvo);
return true;
}
static bool
psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
if (device == 0) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
goto err;
return true;
err:
psb_intel_sdvo_destroy(connector);
return false;
}
static bool
psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
{
psb_intel_sdvo->is_tv = false;
psb_intel_sdvo->base.needs_tv_clock = false;
psb_intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
return false;
/* TV has no XXX1 function block */
if (flags & SDVO_OUTPUT_SVID0)
if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
return false;
if (flags & SDVO_OUTPUT_CVBS0)
if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
return false;
if (flags & SDVO_OUTPUT_LVDS0)
if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
psb_intel_sdvo->controlled_output = 0;
memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(psb_intel_sdvo),
bytes[0], bytes[1]);
return false;
}
psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
return true;
}
static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
int type)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
struct psb_intel_sdvo_tv_format format;
uint32_t format_map, i;
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
return false;
BUILD_BUG_ON(sizeof(format) != 6);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
return false;
memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
if (format_map == 0)
return false;
psb_intel_sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < ARRAY_SIZE(tv_format_names); i++)
if (format_map & (1 << i))
psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
psb_intel_sdvo_connector->tv_format =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", psb_intel_sdvo_connector->format_supported_num);
if (!psb_intel_sdvo_connector->tv_format)
return false;
for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
psb_intel_sdvo_connector->tv_format,
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
}
#define ENHANCEMENT(name, NAME) do { \
if (enhancements.name) { \
if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
return false; \
psb_intel_sdvo_connector->max_##name = data_value[0]; \
psb_intel_sdvo_connector->cur_##name = response; \
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
} while(0)
static bool
psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
struct psb_intel_sdvo_enhancements_reply enhancements)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
/* when horizontal overscan is supported, Add the left/right property */
if (enhancements.overscan_h) {
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_MAX_OVERSCAN_H,
&data_value, 4))
return false;
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_OVERSCAN_H,
&response, 2))
return false;
psb_intel_sdvo_connector->max_hscan = data_value[0];
psb_intel_sdvo_connector->left_margin = data_value[0] - response;
psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
psb_intel_sdvo_connector->left =
drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->left)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
psb_intel_sdvo_connector->right =
drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->right)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
if (enhancements.overscan_v) {
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_MAX_OVERSCAN_V,
&data_value, 4))
return false;
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_OVERSCAN_V,
&response, 2))
return false;
psb_intel_sdvo_connector->max_vscan = data_value[0];
psb_intel_sdvo_connector->top_margin = data_value[0] - response;
psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
psb_intel_sdvo_connector->top =
drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->top)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
psb_intel_sdvo_connector->bottom =
drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->bottom)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
ENHANCEMENT(hpos, HPOS);
ENHANCEMENT(vpos, VPOS);
ENHANCEMENT(saturation, SATURATION);
ENHANCEMENT(contrast, CONTRAST);
ENHANCEMENT(hue, HUE);
ENHANCEMENT(sharpness, SHARPNESS);
ENHANCEMENT(brightness, BRIGHTNESS);
ENHANCEMENT(flicker_filter, FLICKER_FILTER);
ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
if (enhancements.dot_crawl) {
if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
return false;
psb_intel_sdvo_connector->max_dot_crawl = 1;
psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
psb_intel_sdvo_connector->dot_crawl =
drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
}
return true;
}
static bool
psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
struct psb_intel_sdvo_enhancements_reply enhancements)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
ENHANCEMENT(brightness, BRIGHTNESS);
return true;
}
#undef ENHANCEMENT
static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
{
union {
struct psb_intel_sdvo_enhancements_reply reply;
uint16_t response;
} enhancements;
BUILD_BUG_ON(sizeof(enhancements) != 2);
enhancements.response = 0;
psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements));
if (enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;
}
if (IS_TV(psb_intel_sdvo_connector))
return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
else if(IS_LVDS(psb_intel_sdvo_connector))
return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
else
return true;
}
static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
struct psb_intel_sdvo *sdvo = adapter->algo_data;
if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
return -EIO;
return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
}
static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
{
struct psb_intel_sdvo *sdvo = adapter->algo_data;
return sdvo->i2c->algo->functionality(sdvo->i2c);
}
static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
.master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
.functionality = psb_intel_sdvo_ddc_proxy_func
};
static bool
psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
struct drm_device *dev)
{
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
sdvo->ddc.dev.parent = dev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
return i2c_add_adapter(&sdvo->ddc) == 0;
}
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
if (!psb_intel_sdvo)
return false;
psb_intel_sdvo->sdvo_reg = sdvo_reg;
psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
kfree(psb_intel_sdvo);
return false;
}
/* encoder type will be decided later */
gma_encoder = &psb_intel_sdvo->base;
gma_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs,
0, NULL);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
u8 byte;
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err;
}
}
if (IS_SDVOB(sdvo_reg))
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
goto err;
if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
psb_intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err;
}
psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
goto err;
if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
&psb_intel_sdvo->pixel_clock_min,
&psb_intel_sdvo->pixel_clock_max))
goto err;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
SDVO_NAME(psb_intel_sdvo),
psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
psb_intel_sdvo->caps.device_rev_id,
psb_intel_sdvo->pixel_clock_min / 1000,
psb_intel_sdvo->pixel_clock_max / 1000,
(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
psb_intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
psb_intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
err:
drm_encoder_cleanup(&gma_encoder->base);
i2c_del_adapter(&psb_intel_sdvo->ddc);
kfree(psb_intel_sdvo);
return false;
}
| linux-master | drivers/gpu/drm/gma500/psb_intel_sdvo.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007, Intel Corporation.
* All Rights Reserved.
*
* Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
* develop this driver.
*
**************************************************************************/
#include <drm/drm_drv.h>
#include <drm/drm_vblank.h>
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_irq.h"
#include "psb_reg.h"
/*
* inline functions
*/
static inline u32 gma_pipestat(int pipe)
{
if (pipe == 0)
return PIPEASTAT;
if (pipe == 1)
return PIPEBSTAT;
if (pipe == 2)
return PIPECSTAT;
BUG();
}
static inline u32 gma_pipeconf(int pipe)
{
if (pipe == 0)
return PIPEACONF;
if (pipe == 1)
return PIPEBCONF;
if (pipe == 2)
return PIPECCONF;
BUG();
}
void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
u32 reg = gma_pipestat(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal |= (mask | (mask >> 16));
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
gma_power_end(&dev_priv->dev);
}
}
}
void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = gma_pipestat(pipe);
dev_priv->pipestat[pipe] &= ~mask;
if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal &= ~mask;
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
gma_power_end(&dev_priv->dev);
}
}
}
/*
* Display controller interrupt handler for pipe event.
*/
static void gma_pipe_event_handler(struct drm_device *dev, int pipe)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t pipe_stat_val = 0;
uint32_t pipe_stat_reg = gma_pipestat(pipe);
uint32_t pipe_enable = dev_priv->pipestat[pipe];
uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
uint32_t pipe_clear;
uint32_t i = 0;
spin_lock(&dev_priv->irqmask_lock);
pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
pipe_stat_val &= pipe_enable | pipe_status;
pipe_stat_val &= pipe_stat_val >> 16;
spin_unlock(&dev_priv->irqmask_lock);
/* Clear the 2nd level interrupt status bits
* Sometimes the bits are very sticky so we repeat until they unstick */
for (i = 0; i < 0xffff; i++) {
PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
if (pipe_clear == 0)
break;
}
if (pipe_clear)
dev_err(dev->dev,
"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
__func__, pipe, PSB_RVDC32(pipe_stat_reg));
if (pipe_stat_val & PIPE_VBLANK_STATUS) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
unsigned long flags;
drm_handle_vblank(dev, pipe);
spin_lock_irqsave(&dev->event_lock, flags);
if (gma_crtc->page_flip_event) {
drm_crtc_send_vblank_event(crtc,
gma_crtc->page_flip_event);
gma_crtc->page_flip_event = NULL;
drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
/*
* Display controller interrupt handler.
*/
static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
{
if (vdc_stat & _PSB_IRQ_ASLE)
psb_intel_opregion_asle_intr(dev);
if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
gma_pipe_event_handler(dev, 0);
if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
gma_pipe_event_handler(dev, 1);
}
/*
* SGX interrupt handler
*/
static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 val, addr;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
if (val) {
if (val & _PSB_CBI_STAT_PF_N_RW)
DRM_ERROR("SGX MMU page fault:");
else
DRM_ERROR("SGX MMU read / write protection fault:");
if (val & _PSB_CBI_STAT_FAULT_CACHE)
DRM_ERROR("\tCache requestor");
if (val & _PSB_CBI_STAT_FAULT_TA)
DRM_ERROR("\tTA requestor");
if (val & _PSB_CBI_STAT_FAULT_VDM)
DRM_ERROR("\tVDM requestor");
if (val & _PSB_CBI_STAT_FAULT_2D)
DRM_ERROR("\t2D requestor");
if (val & _PSB_CBI_STAT_FAULT_PBE)
DRM_ERROR("\tPBE requestor");
if (val & _PSB_CBI_STAT_FAULT_TSP)
DRM_ERROR("\tTSP requestor");
if (val & _PSB_CBI_STAT_FAULT_ISP)
DRM_ERROR("\tISP requestor");
if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
DRM_ERROR("\tUSSEPDS requestor");
if (val & _PSB_CBI_STAT_FAULT_HOST)
DRM_ERROR("\tHost requestor");
DRM_ERROR("\tMMU failing address is 0x%08x.\n",
(unsigned int)addr);
}
}
/* Clear bits */
PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
}
static irqreturn_t gma_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
u32 sgx_stat_1, sgx_stat_2;
int handled = 0;
spin_lock(&dev_priv->irqmask_lock);
vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
dsp_int = 1;
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
hotplug_int = 1;
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
if (dsp_int) {
gma_vdc_interrupt(dev, vdc_stat);
handled = 1;
}
if (sgx_int) {
sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
handled = 1;
}
/* Note: this bit has other meanings on some devices, so we will
need to address that later if it ever matters */
if (hotplug_int && dev_priv->ops->hotplug) {
handled = dev_priv->ops->hotplug(dev);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
}
PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
rmb();
if (!handled)
return IRQ_NONE;
return IRQ_HANDLED;
}
void gma_irq_preinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
/* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug)
dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
/* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
void gma_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
/* Enable 2D and MMU fault interrupts */
PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
/* This register is safe even if display island is off */
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
for (i = 0; i < dev->num_crtcs; ++i) {
if (dev->vblank[i].enabled)
gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
else
gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
}
if (dev_priv->ops->hotplug_enable)
dev_priv->ops->hotplug_enable(dev, true);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
int gma_irq_install(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
if (dev_priv->use_msi && pci_enable_msi(pdev)) {
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->use_msi = false;
}
if (pdev->irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
gma_irq_preinstall(dev);
/* PCI devices require shared interrupts. */
ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
if (ret)
return ret;
gma_irq_postinstall(dev);
return 0;
}
void gma_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long irqflags;
unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (dev_priv->ops->hotplug_enable)
dev_priv->ops->hotplug_enable(dev, false);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
for (i = 0; i < dev->num_crtcs; ++i) {
if (dev->vblank[i].enabled)
gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
}
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
_PSB_IRQ_MSVDX_FLAG |
_LNC_IRQ_TOPAZ_FLAG;
/* These two registers are safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
wmb();
/* This register is safe even if display island is off */
PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
free_irq(pdev->irq, dev);
if (dev_priv->use_msi)
pci_disable_msi(pdev);
}
int gma_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
uint32_t reg_val = 0;
uint32_t pipeconf_reg = gma_pipeconf(pipe);
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
gma_power_end(dev);
}
if (!(reg_val & PIPEACONF_ENABLE))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (pipe == 0)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
else if (pipe == 1)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
void gma_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (pipe == 0)
dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
else if (pipe == 1)
dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
uint32_t high_frame = PIPEAFRAMEHIGH;
uint32_t low_frame = PIPEAFRAMEPIXEL;
uint32_t pipeconf_reg = PIPEACONF;
uint32_t reg_val = 0;
uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
switch (pipe) {
case 0:
break;
case 1:
high_frame = PIPEBFRAMEHIGH;
low_frame = PIPEBFRAMEPIXEL;
pipeconf_reg = PIPEBCONF;
break;
case 2:
high_frame = PIPECFRAMEHIGH;
low_frame = PIPECFRAMEPIXEL;
pipeconf_reg = PIPECCONF;
break;
default:
dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
return 0;
}
if (!gma_power_begin(dev, false))
return 0;
reg_val = REG_READ(pipeconf_reg);
if (!(reg_val & PIPEACONF_ENABLE)) {
dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
pipe);
goto err_gma_power_end;
}
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
PIPE_FRAME_HIGH_SHIFT);
low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
PIPE_FRAME_LOW_SHIFT);
high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
PIPE_FRAME_HIGH_SHIFT);
} while (high1 != high2);
count = (high1 << 8) | low;
err_gma_power_end:
gma_power_end(dev);
return count;
}
| linux-master | drivers/gpu/drm/gma500/psb_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2007 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
/*
* LVDS I2C backlight control macros
*/
#define BRIGHTNESS_MAX_LEVEL 100
#define BRIGHTNESS_MASK 0xFF
#define BLC_I2C_TYPE 0x01
#define BLC_PWM_TYPT 0x02
#define BLC_POLARITY_NORMAL 0
#define BLC_POLARITY_INVERSE 1
#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
#define PSB_BLC_PWM_PRECISION_FACTOR (10)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
struct psb_intel_lvds_priv {
/*
* Saved LVDO output states
*/
uint32_t savePP_ON;
uint32_t savePP_OFF;
uint32_t saveLVDS;
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
uint32_t savePFIT_PGM_RATIOS;
uint32_t saveBLC_PWM_CTL;
struct gma_i2c_chan *i2c_bus;
};
/*
* Returns the maximum level of the backlight duty cycle field.
*/
static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
ret = REG_READ(BLC_PWM_CTL);
gma_power_end(dev);
} else /* Powered off, use the saved value */
ret = dev_priv->regs.saveBLC_PWM_CTL;
/* Top 15bits hold the frequency mask */
ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT;
ret *= 2; /* Return a 16bit range as needed for setting */
if (ret == 0)
dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL);
return ret;
}
/*
* Set LVDS backlight level by I2C command
*
* FIXME: at some point we need to both track this for PM and also
* disable runtime pm on MRST if the brightness is nil (ie blanked)
*/
static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
unsigned int level)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
u8 out_buf[2];
unsigned int blc_i2c_brightness;
struct i2c_msg msgs[] = {
{
.addr = lvds_i2c_bus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
BRIGHTNESS_MASK /
BRIGHTNESS_MAX_LEVEL);
if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
out_buf[1] = (u8)blc_i2c_brightness;
if (i2c_transfer(&lvds_i2c_bus->base, msgs, 1) == 1) {
dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
dev_priv->lvds_bl->brightnesscmd,
blc_i2c_brightness);
return 0;
}
dev_err(dev->dev, "I2C transfer error\n");
return -1;
}
static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 max_pwm_blc;
u32 blc_pwm_duty_cycle;
max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
/*BLC_PWM_CTL Should be initiated while backlight device init*/
BUG_ON(max_pwm_blc == 0);
blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
REG_WRITE(BLC_PWM_CTL,
(max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
(blc_pwm_duty_cycle));
dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
(max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
(blc_pwm_duty_cycle));
return 0;
}
/*
* Set LVDS backlight level either by I2C or PWM
*/
void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_dbg(dev->dev, "backlight level is %d\n", level);
if (!dev_priv->lvds_bl) {
dev_err(dev->dev, "NO LVDS backlight info\n");
return;
}
if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
psb_lvds_i2c_set_brightness(dev, level);
else
psb_lvds_pwm_set_brightness(dev, level);
}
/*
* Sets the backlight level.
*
* level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
*/
static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL,
(blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
gma_power_end(dev);
} else {
blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
~BACKLIGHT_DUTY_CYCLE_MASK;
dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
}
}
/*
* Sets the power state for the panel.
*/
static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
u32 pp_status;
if (!gma_power_begin(dev, true)) {
dev_err(dev->dev, "set power, chip off!\n");
return;
}
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
psb_intel_lvds_set_backlight(dev,
mode_dev->backlight_duty_cycle);
} else {
psb_intel_lvds_set_backlight(dev, 0);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
}
gma_power_end(dev);
}
static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
if (mode == DRM_MODE_DPMS_ON)
psb_intel_lvds_set_power(dev, true);
else
psb_intel_lvds_set_power(dev, false);
/* XXX: We never power down the LVDS pairs. */
}
static void psb_intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
lvds_priv->saveLVDS = REG_READ(LVDS);
lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
/*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
/*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
/*
* If the light is off at server startup,
* just make it full brightness
*/
if (dev_priv->backlight_duty_cycle == 0)
dev_priv->backlight_duty_cycle =
psb_intel_lvds_get_max_backlight(dev);
dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
lvds_priv->savePP_OFF,
lvds_priv->saveLVDS,
lvds_priv->savePP_CONTROL,
lvds_priv->savePP_CYCLE,
lvds_priv->saveBLC_PWM_CTL);
}
static void psb_intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
u32 pp_status;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
lvds_priv->savePP_OFF,
lvds_priv->saveLVDS,
lvds_priv->savePP_CONTROL,
lvds_priv->savePP_CYCLE,
lvds_priv->saveBLC_PWM_CTL);
REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
/*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
REG_WRITE(LVDS, lvds_priv->saveLVDS);
if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
} else {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
}
}
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
panel_fixed_mode = mode_dev->panel_fixed_mode2;
/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
pr_err("Can't support LVDS on pipe A\n");
return false;
}
if (IS_MRST(dev) && gma_crtc->pipe != 0) {
pr_err("Must use PIPE A\n");
return false;
}
/* Should never happen!! */
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
head) {
if (tmp_encoder != encoder
&& tmp_encoder->crtc == encoder->crtc) {
pr_err("Can't enable LVDS and another encoder on the same pipe\n");
return false;
}
}
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (panel_fixed_mode != NULL) {
adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
adjusted_mode->htotal = panel_fixed_mode->htotal;
adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
adjusted_mode->vtotal = panel_fixed_mode->vtotal;
adjusted_mode->clock = panel_fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode,
CRTC_INTERLACE_HALVE_V);
}
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
* user's requested refresh rate.
*/
return true;
}
static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
psb_intel_lvds_set_power(dev, false);
gma_power_end(dev);
}
static void psb_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
psb_intel_lvds_get_max_backlight(dev);
psb_intel_lvds_set_power(dev, true);
}
static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pfit_control;
/*
* The LVDS pin pair will already have been turned on in the
* psb_intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay)
pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
else
pfit_control = 0;
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret = 0;
if (!IS_MRST(dev))
ret = psb_intel_ddc_get_modes(connector, connector->ddc);
if (ret)
return ret;
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
return 0;
}
void psb_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_i2c_chan *ddc_bus = to_gma_i2c_chan(connector->ddc);
gma_i2c_destroy(ddc_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct drm_encoder *encoder = connector->encoder;
if (!encoder)
return -1;
if (!strcmp(property->name, "scaling mode")) {
struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
uint64_t curval;
if (!crtc)
goto set_prop_error;
switch (value) {
case DRM_MODE_SCALE_FULLSCREEN:
break;
case DRM_MODE_SCALE_NO_SCALE:
break;
case DRM_MODE_SCALE_ASPECT:
break;
default:
goto set_prop_error;
}
if (drm_object_property_get_value(&connector->base,
property,
&curval))
goto set_prop_error;
if (curval == value)
goto set_prop_done;
if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
if (crtc->saved_mode.hdisplay != 0 &&
crtc->saved_mode.vdisplay != 0) {
if (!drm_crtc_helper_set_mode(encoder->crtc,
&crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
encoder->crtc->primary->fb))
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS")) {
const struct drm_encoder_helper_funcs *hfuncs
= encoder->helper_private;
hfuncs->dpms(encoder, value);
}
set_prop_done:
return 0;
set_prop_error:
return -1;
}
static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
.dpms = psb_intel_lvds_encoder_dpms,
.mode_fixup = psb_intel_lvds_mode_fixup,
.prepare = psb_intel_lvds_prepare,
.mode_set = psb_intel_lvds_mode_set,
.commit = psb_intel_lvds_commit,
};
const struct drm_connector_helper_funcs
psb_intel_lvds_connector_helper_funcs = {
.get_modes = psb_intel_lvds_get_modes,
.mode_valid = psb_intel_lvds_mode_valid,
.best_encoder = gma_best_encoder,
};
const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_lvds_set_property,
.destroy = psb_intel_lvds_destroy,
};
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
* @mode_dev: mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
void psb_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct psb_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_i2c_chan *ddc_bus;
u32 lvds;
int pipe;
int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder) {
dev_err(dev->dev, "gma_encoder allocation error\n");
return;
}
encoder = &gma_encoder->base;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector) {
dev_err(dev->dev, "gma_connector allocation error\n");
goto err_free_encoder;
}
lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv) {
dev_err(dev->dev, "LVDS private allocation error\n");
goto err_free_connector;
}
gma_encoder->dev_priv = lvds_priv;
connector = &gma_connector->base;
gma_connector->save = psb_intel_lvds_save;
gma_connector->restore = psb_intel_lvds_restore;
/* Set up the DDC bus. */
ddc_bus = gma_i2c_create(dev, GPIOC, "LVDSDDC_C");
if (!ddc_bus) {
dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto err_free_lvds_priv;
}
ret = drm_connector_init_with_ddc(dev, connector,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
&ddc_bus->base);
if (ret)
goto err_ddc_destroy;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret)
goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
&psb_intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/*Attach connector properties*/
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
/*
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
lvds_priv->i2c_bus = gma_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!lvds_priv->i2c_bus) {
dev_printk(KERN_ERR,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
lvds_priv->i2c_bus->slave_addr = 0x2C;
dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
/*
* LVDS discovery:
* 1) check for EDID on DDC
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
* 4) make sure lid is open
* if closed, act like it's not there for now
*/
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
mutex_lock(&dev->mode_config.mutex);
psb_intel_ddc_get_modes(connector, &ddc_bus->base);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("Using mode from VBT\n");
goto out;
}
}
/*
* If we didn't get EDID, try checking if the panel is already turned
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
lvds = REG_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
mode_dev->panel_fixed_mode =
psb_intel_crtc_mode_get(dev, crtc);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
goto err_unlock;
}
/*
* Blacklist machines with BIOSes that list an LVDS panel without
* actually having one.
*/
out:
mutex_unlock(&dev->mode_config.mutex);
return;
err_unlock:
mutex_unlock(&dev->mode_config.mutex);
gma_i2c_destroy(lvds_priv->i2c_bus);
err_encoder_cleanup:
drm_encoder_cleanup(encoder);
err_connector_cleanup:
drm_connector_cleanup(connector);
err_ddc_destroy:
gma_i2c_destroy(ddc_bus);
err_free_lvds_priv:
kfree(lvds_priv);
err_free_connector:
kfree(gma_connector);
err_free_encoder:
kfree(gma_encoder);
}
| linux-master | drivers/gpu/drm/gma500/psb_intel_lvds.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* GMA500 Backlight Interface
*
* Copyright (c) 2009-2011, Intel Corporation.
*
* Authors: Eric Knopp
*/
#include <linux/backlight.h>
#include <acpi/video.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
#include "intel_bios.h"
#include "power.h"
void gma_backlight_enable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
dev_priv->ops->backlight_set(dev, dev_priv->backlight_level);
}
void gma_backlight_disable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = false;
dev_priv->ops->backlight_set(dev, 0);
}
void gma_backlight_set(struct drm_device *dev, int v)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_level = v;
if (dev_priv->backlight_enabled)
dev_priv->ops->backlight_set(dev, v);
}
static int gma_backlight_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->ops->backlight_get)
return dev_priv->ops->backlight_get(dev);
return dev_priv->backlight_level;
}
static int gma_backlight_update_status(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
int level = backlight_get_brightness(bd);
/* Percentage 1-100% being valid */
if (level < 1)
level = 1;
gma_backlight_set(dev, level);
return 0;
}
static const struct backlight_ops gma_backlight_ops __maybe_unused = {
.get_brightness = gma_backlight_get_brightness,
.update_status = gma_backlight_update_status,
};
int gma_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct backlight_properties props __maybe_unused = {};
int ret;
dev_priv->backlight_enabled = true;
dev_priv->backlight_level = 100;
ret = dev_priv->ops->backlight_init(dev);
if (ret)
return ret;
if (!acpi_video_backlight_use_native()) {
drm_info(dev, "Skipping %s backlight registration\n",
dev_priv->ops->backlight_name);
return 0;
}
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
props.brightness = dev_priv->backlight_level;
props.max_brightness = PSB_MAX_BRIGHTNESS;
props.type = BACKLIGHT_RAW;
dev_priv->backlight_device =
backlight_device_register(dev_priv->ops->backlight_name,
dev->dev, dev,
&gma_backlight_ops, &props);
if (IS_ERR(dev_priv->backlight_device))
return PTR_ERR(dev_priv->backlight_device);
#endif
return 0;
}
void gma_backlight_exit(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->backlight_device)
backlight_device_unregister(dev_priv->backlight_device);
#endif
}
| linux-master | drivers/gpu/drm/gma500/backlight.c |
/*
* Copyright (c) 2002-2010, Intel Corporation.
* Copyright (c) 2014 ATRON electronic GmbH
* Author: Jan Safrata <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
/*
* LPC GPIO based I2C bus for LVDS of Atom E6xx
*/
/*-----------------------------------------------------------------------------
* LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part
* Atom E6xx [D31:F0]
----------------------------------------------------------------------------*/
#define RGEN 0x20
#define RGIO 0x24
#define RGLVL 0x28
#define RGTPE 0x2C
#define RGTNE 0x30
#define RGGPE 0x34
#define RGSMI 0x38
#define RGTS 0x3C
/* The LVDS GPIO clock lines are GPIOSUS[3]
* The LVDS GPIO data lines are GPIOSUS[4]
*/
#define GPIO_CLOCK 0x08
#define GPIO_DATA 0x10
#define LPC_READ_REG(chan, r) inl((chan)->reg + (r))
#define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r))
static int get_clock(void *data)
{
struct gma_i2c_chan *chan = data;
u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_CLOCK;
LPC_WRITE_REG(chan, RGIO, val);
LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
return val;
}
static int get_data(void *data)
{
struct gma_i2c_chan *chan = data;
u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_DATA;
LPC_WRITE_REG(chan, RGIO, val);
LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
return val;
}
static void set_clock(void *data, int state_high)
{
struct gma_i2c_chan *chan = data;
u32 val;
if (state_high) {
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_CLOCK;
LPC_WRITE_REG(chan, RGIO, val);
} else {
val = LPC_READ_REG(chan, RGIO);
val &= ~GPIO_CLOCK;
LPC_WRITE_REG(chan, RGIO, val);
val = LPC_READ_REG(chan, RGLVL);
val &= ~GPIO_CLOCK;
LPC_WRITE_REG(chan, RGLVL, val);
}
}
static void set_data(void *data, int state_high)
{
struct gma_i2c_chan *chan = data;
u32 val;
if (state_high) {
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_DATA;
LPC_WRITE_REG(chan, RGIO, val);
} else {
val = LPC_READ_REG(chan, RGIO);
val &= ~GPIO_DATA;
LPC_WRITE_REG(chan, RGIO, val);
val = LPC_READ_REG(chan, RGLVL);
val &= ~GPIO_DATA;
LPC_WRITE_REG(chan, RGLVL, val);
}
}
struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_i2c_chan *chan;
int ret;
chan = kzalloc(sizeof(struct gma_i2c_chan), GFP_KERNEL);
if (!chan)
return ERR_PTR(-ENOMEM);
chan->drm_dev = dev;
chan->reg = dev_priv->lpc_gpio_base;
strncpy(chan->base.name, "gma500 LPC", I2C_NAME_SIZE - 1);
chan->base.owner = THIS_MODULE;
chan->base.algo_data = &chan->algo;
chan->base.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
chan->algo.getscl = get_clock;
chan->algo.udelay = 100;
chan->algo.timeout = usecs_to_jiffies(2200);
chan->algo.data = chan;
i2c_set_adapdata(&chan->base, chan);
set_data(chan, 1);
set_clock(chan, 1);
udelay(50);
ret = i2c_bit_add_bus(&chan->base);
if (ret < 0) {
kfree(chan);
return ERR_PTR(ret);
}
return chan;
}
| linux-master | drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2007 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
/*
* Intel GPIO access functions
*/
#define I2C_RISEFALL_TIME 20
static int get_clock(void *data)
{
struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 val;
val = REG_READ(chan->reg);
return (val & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 val;
val = REG_READ(chan->reg);
return (val & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 reserved = 0, clock_bits;
/* On most chips, these bits must be preserved in software. */
reserved =
REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
REG_WRITE(chan->reg, reserved | clock_bits);
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
static void set_data(void *data, int state_high)
{
struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 reserved = 0, data_bits;
/* On most chips, these bits must be preserved in software. */
reserved =
REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
else
data_bits =
GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
REG_WRITE(chan->reg, reserved | data_bits);
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
/**
* gma_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
* @reg: GPIO reg to use
* @name: name for this bus
*
* Creates and registers a new i2c bus with the Linux i2c layer, for use
* in output probing and control (e.g. DDC or SDVO control functions).
*
* Possible values for @reg include:
* %GPIOA
* %GPIOB
* %GPIOC
* %GPIOD
* %GPIOE
* %GPIOF
* %GPIOG
* %GPIOH
* see PRM for details on how these different busses are used.
*/
struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
const char *name)
{
struct gma_i2c_chan *chan;
chan = kzalloc(sizeof(struct gma_i2c_chan), GFP_KERNEL);
if (!chan)
goto out_free;
chan->drm_dev = dev;
chan->reg = reg;
snprintf(chan->base.name, I2C_NAME_SIZE, "intel drm %s", name);
chan->base.owner = THIS_MODULE;
chan->base.algo_data = &chan->algo;
chan->base.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
chan->algo.getscl = get_clock;
chan->algo.udelay = 20;
chan->algo.timeout = usecs_to_jiffies(2200);
chan->algo.data = chan;
i2c_set_adapdata(&chan->base, chan);
if (i2c_bit_add_bus(&chan->base))
goto out_free;
/* JJJ: raise SCL and SDA? */
set_data(chan, 1);
set_clock(chan, 1);
udelay(20);
return chan;
out_free:
kfree(chan);
return NULL;
}
/**
* gma_i2c_destroy - unregister and free i2c bus resources
* @chan: channel to free
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
void gma_i2c_destroy(struct gma_i2c_chan *chan)
{
if (!chan)
return;
i2c_del_adapter(&chan->base);
kfree(chan);
}
| linux-master | drivers/gpu/drm/gma500/intel_i2c.c |
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Li Peng <[email protected]>
*/
#include <linux/delay.h>
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
#define HDMI_HCR 0x1000
#define HCR_ENABLE_HDCP (1 << 5)
#define HCR_ENABLE_AUDIO (1 << 2)
#define HCR_ENABLE_PIXEL (1 << 1)
#define HCR_ENABLE_TMDS (1 << 0)
#define HDMI_HICR 0x1004
#define HDMI_HSR 0x1008
#define HDMI_HISR 0x100C
#define HDMI_DETECT_HDP (1 << 0)
#define HDMI_VIDEO_REG 0x3000
#define HDMI_UNIT_EN (1 << 7)
#define HDMI_MODE_OUTPUT (1 << 0)
#define HDMI_HBLANK_A 0x3100
#define HDMI_AUDIO_CTRL 0x4000
#define HDMI_ENABLE_AUDIO (1 << 0)
#define PCH_HTOTAL_B 0x3100
#define PCH_HBLANK_B 0x3104
#define PCH_HSYNC_B 0x3108
#define PCH_VTOTAL_B 0x310C
#define PCH_VBLANK_B 0x3110
#define PCH_VSYNC_B 0x3114
#define PCH_PIPEBSRC 0x311C
#define PCH_PIPEB_DSL 0x3800
#define PCH_PIPEB_SLC 0x3804
#define PCH_PIPEBCONF 0x3808
#define PCH_PIPEBSTAT 0x3824
#define CDVO_DFT 0x5000
#define CDVO_SLEWRATE 0x5004
#define CDVO_STRENGTH 0x5008
#define CDVO_RCOMP 0x500C
#define DPLL_CTRL 0x6000
#define DPLL_PDIV_SHIFT 16
#define DPLL_PDIV_MASK (0xf << 16)
#define DPLL_PWRDN (1 << 4)
#define DPLL_RESET (1 << 3)
#define DPLL_FASTEN (1 << 2)
#define DPLL_ENSTAT (1 << 1)
#define DPLL_DITHEN (1 << 0)
#define DPLL_DIV_CTRL 0x6004
#define DPLL_CLKF_MASK 0xffffffc0
#define DPLL_CLKR_MASK (0x3f)
#define DPLL_CLK_ENABLE 0x6008
#define DPLL_EN_DISP (1 << 31)
#define DPLL_SEL_HDMI (1 << 8)
#define DPLL_EN_HDMI (1 << 1)
#define DPLL_EN_VGA (1 << 0)
#define DPLL_ADJUST 0x600C
#define DPLL_STATUS 0x6010
#define DPLL_UPDATE 0x6014
#define DPLL_DFT 0x6020
struct intel_range {
int min, max;
};
struct oaktrail_hdmi_limit {
struct intel_range vco, np, nr, nf;
};
struct oaktrail_hdmi_clock {
int np;
int nr;
int nf;
int dot;
};
#define VCO_MIN 320000
#define VCO_MAX 1650000
#define NP_MIN 1
#define NP_MAX 15
#define NR_MIN 1
#define NR_MAX 64
#define NF_MIN 2
#define NF_MAX 4095
static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
.vco = { .min = VCO_MIN, .max = VCO_MAX },
.np = { .min = NP_MIN, .max = NP_MAX },
.nr = { .min = NR_MIN, .max = NR_MAX },
.nf = { .min = NF_MIN, .max = NF_MAX },
};
static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(HDMI_HCR, 0x67);
HDMI_READ(HDMI_HCR);
HDMI_WRITE(0x51a8, 0x10);
HDMI_READ(0x51a8);
HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
HDMI_READ(HDMI_AUDIO_CTRL);
}
static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(0x51a8, 0x0);
HDMI_READ(0x51a8);
HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
HDMI_READ(HDMI_AUDIO_CTRL);
HDMI_WRITE(HDMI_HCR, 0x47);
HDMI_READ(HDMI_HCR);
}
static unsigned int htotal_calculate(struct drm_display_mode *mode)
{
u32 new_crtc_htotal;
/*
* 1024 x 768 new_crtc_htotal = 0x1024;
* 1280 x 1024 new_crtc_htotal = 0x0c34;
*/
new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
}
static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
int refclk, struct oaktrail_hdmi_clock *best_clock)
{
int np_min, np_max, nr_min, nr_max;
int np, nr, nf;
np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
if (np_min < oaktrail_hdmi_limit.np.min)
np_min = oaktrail_hdmi_limit.np.min;
if (np_max > oaktrail_hdmi_limit.np.max)
np_max = oaktrail_hdmi_limit.np.max;
nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
if (nr_min < oaktrail_hdmi_limit.nr.min)
nr_min = oaktrail_hdmi_limit.nr.min;
if (nr_max > oaktrail_hdmi_limit.nr.max)
nr_max = oaktrail_hdmi_limit.nr.max;
np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
/*
* 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
* 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
*/
best_clock->np = np;
best_clock->nr = nr - 1;
best_clock->nf = (nf << 14);
}
static void scu_busy_loop(void __iomem *scu_base)
{
u32 status = 0;
u32 loop_count = 0;
status = readl(scu_base + 0x04);
while (status & 1) {
udelay(1); /* scu processing time is in few u secods */
status = readl(scu_base + 0x04);
loop_count++;
/* break if scu doesn't reset busy bit after huge retry */
if (loop_count > 1000) {
DRM_DEBUG_KMS("SCU IPC timed out");
return;
}
}
}
/*
* You don't want to know, you really really don't want to know....
*
* This is magic. However it's safe magic because of the way the platform
* works and it is necessary magic.
*/
static void oaktrail_hdmi_reset(struct drm_device *dev)
{
void __iomem *base;
unsigned long scu_ipc_mmio = 0xff11c000UL;
int scu_len = 1024;
base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
if (base == NULL) {
DRM_ERROR("failed to map scu mmio\n");
return;
}
/* scu ipc: assert hdmi controller reset */
writel(0xff11d118, base + 0x0c);
writel(0x7fffffdf, base + 0x80);
writel(0x42005, base + 0x0);
scu_busy_loop(base);
/* scu ipc: de-assert hdmi controller reset */
writel(0xff11d118, base + 0x0c);
writel(0x7fffffff, base + 0x80);
writel(0x42005, base + 0x0);
scu_busy_loop(base);
iounmap(base);
}
int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int pipe = 1;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int refclk;
struct oaktrail_hdmi_clock clock;
u32 dspcntr, pipeconf, dpll, temp;
int dspcntr_reg = DSPBCNTR;
if (!gma_power_begin(dev, true))
return 0;
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable dpll if necessary */
dpll = REG_READ(DPLL_CTRL);
if ((dpll & DPLL_PWRDN) == 0) {
REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
REG_WRITE(DPLL_STATUS, 0x1);
}
udelay(150);
/* Reset controller */
oaktrail_hdmi_reset(dev);
/* program and enable dpll */
refclk = 25000;
oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
/* Set the DPLL */
dpll = REG_READ(DPLL_CTRL);
dpll &= ~DPLL_PDIV_MASK;
dpll &= ~(DPLL_PWRDN | DPLL_RESET);
REG_WRITE(DPLL_CTRL, 0x00000008);
REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
REG_WRITE(DPLL_UPDATE, 0x80000000);
REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
udelay(150);
/* configure HDMI */
HDMI_WRITE(0x1004, 0x1fd);
HDMI_WRITE(0x2000, 0x1);
HDMI_WRITE(0x2008, 0x0);
HDMI_WRITE(0x3130, 0x8);
HDMI_WRITE(0x101c, 0x1800810);
temp = htotal_calculate(adjusted_mode);
REG_WRITE(htot_reg, temp);
REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
REG_WRITE(dsppos_reg, 0);
/* Flush the plane changes */
{
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
/* Set up the display plane register */
dspcntr = REG_READ(dspcntr_reg);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPPLANE_SEL_PIPE_B;
dspcntr |= DISPLAY_PLANE_ENABLE;
/* setup pipeconf */
pipeconf = REG_READ(pipeconf_reg);
pipeconf |= PIPEACONF_ENABLE;
REG_WRITE(pipeconf_reg, pipeconf);
REG_READ(pipeconf_reg);
REG_WRITE(PCH_PIPEBCONF, pipeconf);
REG_READ(PCH_PIPEBCONF);
gma_wait_for_vblank(dev);
REG_WRITE(dspcntr_reg, dspcntr);
gma_wait_for_vblank(dev);
gma_power_end(dev);
return 0;
}
void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
u32 temp;
DRM_DEBUG_KMS("%s %d\n", __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_OFF:
REG_WRITE(VGACNTRL, 0x80000000);
/* Disable plane */
temp = REG_READ(DSPBCNTR);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
REG_READ(DSPBCNTR);
/* Flush the plane changes */
REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
REG_READ(DSPBSURF);
}
/* Disable pipe B */
temp = REG_READ(PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
REG_READ(PIPEBCONF);
}
/* Disable LNW Pipes, etc */
temp = REG_READ(PCH_PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
REG_READ(PCH_PIPEBCONF);
}
/* wait for pipe off */
udelay(150);
/* Disable dpll */
temp = REG_READ(DPLL_CTRL);
if ((temp & DPLL_PWRDN) == 0) {
REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
REG_WRITE(DPLL_STATUS, 0x1);
}
/* wait for dpll off */
udelay(150);
break;
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable dpll */
temp = REG_READ(DPLL_CTRL);
if ((temp & DPLL_PWRDN) != 0) {
REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
temp = REG_READ(DPLL_CLK_ENABLE);
REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
REG_READ(DPLL_CLK_ENABLE);
}
/* wait for dpll warm up */
udelay(150);
/* Enable pipe B */
temp = REG_READ(PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
REG_READ(PIPEBCONF);
}
/* Enable LNW Pipe B */
temp = REG_READ(PCH_PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
REG_READ(PCH_PIPEBCONF);
}
gma_wait_for_vblank(dev);
/* Enable plane */
temp = REG_READ(DSPBCNTR);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
REG_READ(DSPBSURF);
}
gma_crtc_load_lut(crtc);
}
/* DSPARB */
REG_WRITE(DSPARB, 0x00003fbf);
/* FW1 */
REG_WRITE(0x70034, 0x3f880a0a);
/* FW2 */
REG_WRITE(0x70038, 0x0b060808);
/* FW4 */
REG_WRITE(0x70050, 0x08030404);
/* FW5 */
REG_WRITE(0x70054, 0x04040404);
/* LNC Chicken Bits - Squawk! */
REG_WRITE(0x70400, 0x4000);
return;
}
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
if (dpms_mode == mode)
return;
if (mode != DRM_MODE_DPMS_ON)
temp = 0x0;
else
temp = 0x99;
dpms_mode = mode;
HDMI_WRITE(HDMI_VIDEO_REG, temp);
}
static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
return MODE_OK;
}
static enum drm_connector_status
oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
temp = HDMI_READ(HDMI_HSR);
DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
if ((temp & HDMI_DETECT_HDP) != 0)
status = connector_status_connected;
else
status = connector_status_disconnected;
return status;
}
static const unsigned char raw_edid[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
};
static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{
struct i2c_adapter *i2c_adap;
struct edid *edid;
int ret = 0;
/*
* FIXME: We need to figure this lot out. In theory we can
* read the EDID somehow but I've yet to find working reference
* code.
*/
i2c_adap = i2c_get_adapter(3);
if (i2c_adap == NULL) {
DRM_ERROR("No ddc adapter available!\n");
edid = (struct edid *)raw_edid;
} else {
edid = (struct edid *)raw_edid;
/* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
}
if (edid) {
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
return ret;
}
static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
oaktrail_hdmi_audio_enable(dev);
return;
}
static void oaktrail_hdmi_destroy(struct drm_connector *connector)
{
return;
}
static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
.dpms = oaktrail_hdmi_dpms,
.prepare = gma_encoder_prepare,
.mode_set = oaktrail_hdmi_mode_set,
.commit = gma_encoder_commit,
};
static const struct drm_connector_helper_funcs
oaktrail_hdmi_connector_helper_funcs = {
.get_modes = oaktrail_hdmi_get_modes,
.mode_valid = oaktrail_hdmi_mode_valid,
.best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = oaktrail_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = oaktrail_hdmi_destroy,
};
void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
goto failed_connector;
connector = &gma_connector->base;
encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
dev_info(dev->dev, "HDMI initialised.\n");
return;
failed_connector:
kfree(gma_encoder);
}
void oaktrail_hdmi_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev;
struct oaktrail_hdmi_dev *hdmi_dev;
int ret;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
if (!pdev)
return;
hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
if (!hdmi_dev) {
dev_err(dev->dev, "failed to allocate memory\n");
goto out;
}
ret = pci_enable_device(pdev);
if (ret) {
dev_err(dev->dev, "failed to enable hdmi controller\n");
goto free;
}
hdmi_dev->mmio = pci_resource_start(pdev, 0);
hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
if (!hdmi_dev->regs) {
dev_err(dev->dev, "failed to map hdmi mmio\n");
goto free;
}
hdmi_dev->dev = pdev;
pci_set_drvdata(pdev, hdmi_dev);
/* Initialize i2c controller */
ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
if (ret)
dev_err(dev->dev, "HDMI I2C initialization failed\n");
dev_priv->hdmi_priv = hdmi_dev;
oaktrail_hdmi_audio_disable(dev);
dev_info(dev->dev, "HDMI hardware present.\n");
return;
free:
kfree(hdmi_dev);
out:
return;
}
void oaktrail_hdmi_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct pci_dev *pdev;
if (hdmi_dev) {
pdev = hdmi_dev->dev;
pci_set_drvdata(pdev, NULL);
oaktrail_hdmi_i2c_exit(pdev);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);
}
}
/* save HDMI register state */
void oaktrail_hdmi_save(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
/* pipe B */
pipeb->conf = PSB_RVDC32(PIPEBCONF);
pipeb->src = PSB_RVDC32(PIPEBSRC);
pipeb->htotal = PSB_RVDC32(HTOTAL_B);
pipeb->hblank = PSB_RVDC32(HBLANK_B);
pipeb->hsync = PSB_RVDC32(HSYNC_B);
pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
pipeb->vblank = PSB_RVDC32(VBLANK_B);
pipeb->vsync = PSB_RVDC32(VSYNC_B);
hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
/* plane */
pipeb->cntr = PSB_RVDC32(DSPBCNTR);
pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
pipeb->addr = PSB_RVDC32(DSPBBASE);
pipeb->surf = PSB_RVDC32(DSPBSURF);
pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
/* cursor B */
regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
/* save palette */
for (i = 0; i < 256; i++)
pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
}
/* restore HDMI register state */
void oaktrail_hdmi_restore(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
udelay(150);
/* pipe */
PSB_WVDC32(pipeb->src, PIPEBSRC);
PSB_WVDC32(pipeb->htotal, HTOTAL_B);
PSB_WVDC32(pipeb->hblank, HBLANK_B);
PSB_WVDC32(pipeb->hsync, HSYNC_B);
PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
PSB_WVDC32(pipeb->vblank, VBLANK_B);
PSB_WVDC32(pipeb->vsync, VSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
PSB_WVDC32(pipeb->conf, PIPEBCONF);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
/* plane */
PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
PSB_WVDC32(pipeb->cntr, DSPBCNTR);
PSB_WVDC32(pipeb->surf, DSPBSURF);
/* cursor B */
PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS);
PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE);
/* restore palette */
for (i = 0; i < 256; i++)
PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
}
| linux-master | drivers/gpu/drm/gma500/oaktrail_hdmi.c |
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <[email protected]>
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
u32 temp, reg;
reg = ADPA;
temp = REG_READ(reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_SUSPEND:
temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_OFF:
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
}
REG_WRITE(reg, temp);
}
static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* The lowest clock for CDV is 20000KHz */
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
/* The max clock for CDV is 355 instead of 400 */
if (mode->clock > 355000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int dpll_md_reg;
u32 adpa, dpll_md;
u32 adpa_reg;
if (gma_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
adpa_reg = ADPA;
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
{
dpll_md = REG_READ(dpll_md_reg);
REG_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
}
adpa = 0;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (gma_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
REG_WRITE(adpa_reg, adpa);
}
/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
* \return true if CRT is connected.
* \return false if CRT is disconnected.
*/
static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
bool force)
{
struct drm_device *dev = connector->dev;
u32 hotplug_en;
int i, tries = 0, ret = false;
u32 orig;
/*
* On a CDV thep, CRT detect sequence need to be done twice
* to get a reliable result.
*/
tries = 2;
orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
for (i = 0; i < tries ; i++) {
unsigned long timeout;
/* turn on the FORCE_DETECT */
REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
do {
if (!(REG_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT))
break;
msleep(1);
} while (time_after(timeout, jiffies));
}
if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
CRT_HOTPLUG_MONITOR_NONE)
ret = true;
/* clear the interrupt we just generated, if any */
REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
/* and put the bits back */
REG_WRITE(PORT_HOTPLUG_EN, orig);
return ret;
}
static enum drm_connector_status cdv_intel_crt_detect(
struct drm_connector *connector, bool force)
{
if (cdv_intel_crt_detect_hotplug(connector, force))
return connector_status_connected;
else
return connector_status_disconnected;
}
static void cdv_intel_crt_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_i2c_chan *ddc_bus = to_gma_i2c_chan(connector->ddc);
gma_i2c_destroy(ddc_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
static int cdv_intel_crt_get_modes(struct drm_connector *connector)
{
return psb_intel_ddc_get_modes(connector, connector->ddc);
}
static int cdv_intel_crt_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
return 0;
}
/*
* Routines for controlling stuff on the analog port
*/
static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
.dpms = cdv_intel_crt_dpms,
.prepare = gma_encoder_prepare,
.commit = gma_encoder_commit,
.mode_set = cdv_intel_crt_mode_set,
};
static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = cdv_intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = cdv_intel_crt_destroy,
.set_property = cdv_intel_crt_set_property,
};
static const struct drm_connector_helper_funcs
cdv_intel_crt_connector_helper_funcs = {
.mode_valid = cdv_intel_crt_mode_valid,
.get_modes = cdv_intel_crt_get_modes,
.best_encoder = gma_best_encoder,
};
void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct gma_connector *gma_connector;
struct gma_encoder *gma_encoder;
struct gma_i2c_chan *ddc_bus;
struct drm_connector *connector;
struct drm_encoder *encoder;
int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
goto err_free_encoder;
/* Set up the DDC bus. */
ddc_bus = gma_i2c_create(dev, GPIOA, "CRTDDC_A");
if (!ddc_bus) {
dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
goto err_free_connector;
}
connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init_with_ddc(dev, connector,
&cdv_intel_crt_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
&ddc_bus->base);
if (ret)
goto err_ddc_destroy;
encoder = &gma_encoder->base;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret)
goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_ANALOG;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
drm_connector_helper_add(connector,
&cdv_intel_crt_connector_helper_funcs);
return;
err_connector_cleanup:
drm_connector_cleanup(&gma_connector->base);
err_ddc_destroy:
gma_i2c_destroy(ddc_bus);
err_free_connector:
kfree(gma_connector);
err_free_encoder:
kfree(gma_encoder);
return;
}
| linux-master | drivers/gpu/drm/gma500/cdv_intel_crt.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include <linux/fb.h>
#include <linux/pfn_t.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include "gem.h"
#include "psb_drv.h"
/*
* VM area struct
*/
static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct fb_info *info = vma->vm_private_data;
unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
vm_fault_t err = VM_FAULT_SIGBUS;
unsigned long page_num = vma_pages(vma);
unsigned long i;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; ++i) {
err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
if (unlikely(err & VM_FAULT_ERROR))
break;
address += PAGE_SIZE;
++pfn;
}
return err;
}
static const struct vm_operations_struct psb_fbdev_vm_ops = {
.fault = psb_fbdev_vm_fault,
};
/*
* struct fb_ops
*/
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
static int psb_fbdev_fb_setcolreg(unsigned int regno,
unsigned int red, unsigned int green,
unsigned int blue, unsigned int transp,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
uint32_t v;
if (!fb)
return -ENOMEM;
if (regno > 255)
return 1;
red = CMAP_TOHW(red, info->var.red.length);
blue = CMAP_TOHW(blue, info->var.blue.length);
green = CMAP_TOHW(green, info->var.green.length);
transp = CMAP_TOHW(transp, info->var.transp.length);
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
if (regno < 16) {
switch (fb->format->cpp[0] * 8) {
case 16:
((uint32_t *) info->pseudo_palette)[regno] = v;
break;
case 24:
case 32:
((uint32_t *) info->pseudo_palette)[regno] = v;
break;
}
}
return 0;
}
static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
if (vma->vm_pgoff != 0)
return -EINVAL;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
/*
* If this is a GEM object then info->screen_base is the virtual
* kernel remapping of the object. FIXME: Review if this is
* suitable for our mmap work
*/
vma->vm_ops = &psb_fbdev_vm_ops;
vma->vm_private_data = info;
vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
static void psb_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
struct drm_gem_object *obj = fb->obj[0];
drm_fb_helper_fini(fb_helper);
drm_framebuffer_unregister_private(fb);
fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
drm_gem_object_put(obj);
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
static const struct fb_ops psb_fbdev_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_IOMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psb_fbdev_fb_setcolreg,
__FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_mmap = psb_fbdev_fb_mmap,
.fb_destroy = psb_fbdev_fb_destroy,
};
/*
* struct drm_fb_helper_funcs
*/
static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = { };
int size;
int ret;
struct psb_gem_object *backing;
struct drm_gem_object *obj;
u32 bpp, depth;
/* No 24-bit packed mode */
if (sizes->surface_bpp == 24) {
sizes->surface_bpp = 32;
sizes->surface_depth = 24;
}
bpp = sizes->surface_bpp;
depth = sizes->surface_depth;
/*
* If the mode does not fit in 32 bit then switch to 16 bit to get
* a console on full resolution. The X mode setting server will
* allocate its own 32-bit GEM framebuffer.
*/
size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
sizes->surface_height;
size = ALIGN(size, PAGE_SIZE);
if (size > dev_priv->vram_stolen_size) {
sizes->surface_bpp = 16;
sizes->surface_depth = 16;
}
bpp = sizes->surface_bpp;
depth = sizes->surface_depth;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
if (IS_ERR(backing))
return PTR_ERR(backing);
obj = &backing->base;
fb = psb_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_drm_gem_object_put;
}
fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_framebuffer_unregister_private;
}
info->fbops = &psb_fbdev_fb_ops;
/* Accessed stolen memory directly */
info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fix.smem_start = dev_priv->stolen_base + backing->offset;
info->fix.smem_len = size;
info->fix.ywrapstep = 0;
info->fix.ypanstep = 0;
info->fix.mmio_start = pci_resource_start(pdev, 0);
info->fix.mmio_len = pci_resource_len(pdev, 0);
fb_memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
return 0;
err_drm_framebuffer_unregister_private:
drm_framebuffer_unregister_private(fb);
fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
}
static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
.fb_probe = psb_fbdev_fb_probe,
};
/*
* struct drm_client_funcs and setup code
*/
static void psb_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_fb_helper_unprepare(fb_helper);
drm_client_release(&fb_helper->client);
kfree(fb_helper);
}
}
static int psb_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int psb_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs psb_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = psb_fbdev_client_unregister,
.restore = psb_fbdev_client_restore,
.hotplug = psb_fbdev_client_hotplug,
};
void psb_fbdev_setup(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct drm_fb_helper *fb_helper;
int ret;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs);
ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_fb_helper_unprepare;
}
drm_client_register(&fb_helper->client);
return;
err_drm_fb_helper_unprepare:
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
| linux-master | drivers/gpu/drm/gma500/fbdev.c |
/*
* Copyright 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/acpi.h>
#include "psb_drv.h"
#include "psb_irq.h"
#include "psb_intel_reg.h"
#define PCI_ASLE 0xe4
#define PCI_ASLS 0xfc
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
struct opregion_header {
u8 signature[16];
u32 size;
u32 opregion_ver;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
} __packed;
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
u32 drdy; /* driver readiness */
u32 csts; /* notification status */
u32 cevt; /* current event */
u8 rsvd1[20];
u32 didl[8]; /* supported display devices ID list */
u32 cpdl[8]; /* currently presented display list */
u32 cadl[8]; /* currently active display list */
u32 nadl[8]; /* next active devices list */
u32 aslp; /* ASL sleep time-out */
u32 tidx; /* toggle table index */
u32 chpd; /* current hotplug enable indicator */
u32 clid; /* current lid state*/
u32 cdck; /* current docking state */
u32 sxsw; /* Sx state resume */
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
} __packed;
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
/*FIXME: add it later*/
} __packed;
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
u32 ardy; /* driver readiness */
u32 aslc; /* ASLE interrupt command */
u32 tche; /* technology enabled indicator */
u32 alsi; /* current ALS illuminance reading */
u32 bclp; /* backlight brightness to set */
u32 pfit; /* panel fitting state */
u32 cblv; /* current brightness level */
u16 bclm[20]; /* backlight level duty cycle mapping table */
u32 cpfm; /* current panel fitting mode */
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
u8 rsvd[102];
} __packed;
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
#define ASLE_SET_PFIT (1 << 2)
#define ASLE_SET_PWM_FREQ (1 << 3)
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
/* ASLE panel fitting request */
#define ASLE_PFIT_VALID (1<<31)
#define ASLE_PFIT_CENTER (1<<0)
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
#define ASLE_PFIT_STRETCH_GFX (1<<2)
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
/* ASLE panel fitting request */
#define ASLE_PFIT_VALID (1<<31)
#define ASLE_PFIT_CENTER (1<<0)
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
#define ASLE_PFIT_STRETCH_GFX (1<<2)
/* PWM frequency and minimum brightness */
#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
#define ASLE_PFMB_PWM_VALID (1<<31)
#define ASLE_CBLV_VALID (1<<31)
static struct psb_intel_opregion *system_opregion;
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
gma_backlight_set(dev, bclp * PSB_MAX_BRIGHTNESS / 255);
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
return 0;
}
static void psb_intel_opregion_asle_work(struct work_struct *work)
{
struct psb_intel_opregion *opregion =
container_of(work, struct psb_intel_opregion, asle_work);
struct drm_psb_private *dev_priv =
container_of(opregion, struct drm_psb_private, opregion);
struct opregion_asle *asle = opregion->asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(&dev_priv->dev, asle->bclp);
asle->aslc = asle_stat;
}
void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
void psb_intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle && system_opregion ) {
/* Don't do this on Medfield or other non PC like devices, they
use the bit for something different altogether */
gma_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
gma_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
| ASLE_PFMB_EN;
asle->ardy = 1;
}
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
static int psb_intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
We might want to fix the video driver to be opregion-aware in
future, but right now we just indicate to the firmware that the
request has been handled */
struct opregion_acpi *acpi;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
acpi->csts = 0;
return NOTIFY_OK;
}
static struct notifier_block psb_intel_opregion_notifier = {
.notifier_call = psb_intel_opregion_video_event,
};
void psb_intel_opregion_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video
* module. We don't actually need to do anything with them. */
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
system_opregion = opregion;
register_acpi_notifier(&psb_intel_opregion_notifier);
}
}
void psb_intel_opregion_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
opregion->acpi->drdy = 0;
system_opregion = NULL;
unregister_acpi_notifier(&psb_intel_opregion_notifier);
}
cancel_work_sync(&opregion->asle_work);
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->vbt = NULL;
}
int psb_intel_opregion_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
void __iomem *base;
int err = 0;
pci_read_config_dword(pdev, PCI_ASLS, &opregion_phy);
if (opregion_phy == 0) {
DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
return -ENOTSUPP;
}
INIT_WORK(&opregion->asle_work, psb_intel_opregion_asle_work);
DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
base = acpi_os_ioremap(opregion_phy, 8*1024);
if (!base)
return -ENOMEM;
if (memcmp(base, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
opregion->lid_state = base + ACPI_CLID;
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
}
return 0;
err_out:
iounmap(base);
return err;
}
| linux-master | drivers/gpu/drm/gma500/opregion.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include "psb_drv.h"
#include "gma_device.h"
void gma_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
pci_read_config_dword(pci_root, 0xD4, &clock);
pci_dev_put(pci_root);
switch (clock & 0x07) {
case 0:
dev_priv->core_freq = 100;
break;
case 1:
dev_priv->core_freq = 133;
break;
case 2:
dev_priv->core_freq = 150;
break;
case 3:
dev_priv->core_freq = 178;
break;
case 4:
dev_priv->core_freq = 200;
break;
case 5:
case 6:
case 7:
dev_priv->core_freq = 266;
break;
default:
dev_priv->core_freq = 0;
}
}
| linux-master | drivers/gpu/drm/gma500/gma_device.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
/* TODO
* - Split functions by vbt type
* - Make them all take drm_device
* - Check ioremap failures
*/
#include <drm/drm.h>
#include "mid_bios.h"
#include "psb_drv.h"
static void mid_get_fuse_settings(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
uint32_t fuse_value = 0;
uint32_t fuse_value_tmp = 0;
#define FB_REG06 0xD0810600
#define FB_MIPI_DISABLE (1 << 11)
#define FB_REG09 0xD0810900
#define FB_SKU_MASK 0x7000
#define FB_SKU_SHIFT 12
#define FB_SKU_100 0
#define FB_SKU_100L 1
#define FB_SKU_83 2
if (pci_root == NULL) {
WARN_ON(1);
return;
}
pci_write_config_dword(pci_root, 0xD0, FB_REG06);
pci_read_config_dword(pci_root, 0xD4, &fuse_value);
/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
if (IS_MRST(dev))
dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
DRM_INFO("internal display is %s\n",
dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
/* Prevent runtime suspend at start*/
if (dev_priv->iLVDS_enable) {
dev_priv->is_lvds_on = true;
dev_priv->is_mipi_on = false;
} else {
dev_priv->is_mipi_on = true;
dev_priv->is_lvds_on = false;
}
dev_priv->video_device_fuse = fuse_value;
pci_write_config_dword(pci_root, 0xD0, FB_REG09);
pci_read_config_dword(pci_root, 0xD4, &fuse_value);
dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
dev_priv->fuse_reg_value = fuse_value;
switch (fuse_value_tmp) {
case FB_SKU_100:
dev_priv->core_freq = 200;
break;
case FB_SKU_100L:
dev_priv->core_freq = 100;
break;
case FB_SKU_83:
dev_priv->core_freq = 166;
break;
default:
dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
fuse_value_tmp);
dev_priv->core_freq = 0;
}
dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
pci_dev_put(pci_root);
}
/*
* Get the revison ID, B0:D2:F0;0x08
*/
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
struct pci_dev *pdev = to_pci_dev(dev_priv->dev.dev);
int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
if (pci_gfx_root == NULL) {
WARN_ON(1);
return;
}
pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
pci_dev_put(pci_gfx_root);
dev_dbg(dev_priv->dev.dev, "platform_rev_id is %x\n", dev_priv->platform_rev_id);
}
struct mid_vbt_header {
u32 signature;
u8 revision;
} __packed;
/* The same for r0 and r1 */
struct vbt_r0 {
struct mid_vbt_header vbt_header;
u8 size;
u8 checksum;
} __packed;
struct vbt_r10 {
struct mid_vbt_header vbt_header;
u8 checksum;
u16 size;
u8 panel_count;
u8 primary_panel_idx;
u8 secondary_panel_idx;
u8 __reserved[5];
} __packed;
static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
{
void __iomem *vbt_virtual;
vbt_virtual = ioremap(addr, sizeof(*vbt));
if (vbt_virtual == NULL)
return -1;
memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
iounmap(vbt_virtual);
return 0;
}
static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
{
void __iomem *vbt_virtual;
vbt_virtual = ioremap(addr, sizeof(*vbt));
if (!vbt_virtual)
return -1;
memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
iounmap(vbt_virtual);
return 0;
}
static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
{
struct vbt_r0 vbt;
void __iomem *gct_virtual;
struct gct_r0 gct;
u8 bpi;
if (read_vbt_r0(addr, &vbt))
return -1;
gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
if (!gct_virtual)
return -1;
memcpy_fromio(&gct, gct_virtual, sizeof(gct));
iounmap(gct_virtual);
bpi = gct.PD.BootPanelIndex;
dev_priv->gct_data.bpi = bpi;
dev_priv->gct_data.pt = gct.PD.PanelType;
dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
dev_priv->gct_data.Panel_Port_Control =
gct.panel[bpi].Panel_Port_Control;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
gct.panel[bpi].Panel_MIPI_Display_Descriptor;
return 0;
}
static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
{
struct vbt_r0 vbt;
void __iomem *gct_virtual;
struct gct_r1 gct;
u8 bpi;
if (read_vbt_r0(addr, &vbt))
return -1;
gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
if (!gct_virtual)
return -1;
memcpy_fromio(&gct, gct_virtual, sizeof(gct));
iounmap(gct_virtual);
bpi = gct.PD.BootPanelIndex;
dev_priv->gct_data.bpi = bpi;
dev_priv->gct_data.pt = gct.PD.PanelType;
dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
dev_priv->gct_data.Panel_Port_Control =
gct.panel[bpi].Panel_Port_Control;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
gct.panel[bpi].Panel_MIPI_Display_Descriptor;
return 0;
}
static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
{
struct vbt_r10 vbt;
void __iomem *gct_virtual;
struct gct_r10 *gct;
struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
struct gct_r10_timing_info *ti;
int ret = -1;
if (read_vbt_r10(addr, &vbt))
return -1;
gct = kmalloc_array(vbt.panel_count, sizeof(*gct), GFP_KERNEL);
if (!gct)
return -ENOMEM;
gct_virtual = ioremap(addr + sizeof(vbt),
sizeof(*gct) * vbt.panel_count);
if (!gct_virtual)
goto out;
memcpy_fromio(gct, gct_virtual, sizeof(*gct));
iounmap(gct_virtual);
dev_priv->gct_data.bpi = vbt.primary_panel_idx;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
ti = &gct[vbt.primary_panel_idx].DTD;
dp_ti->pixel_clock = ti->pixel_clock;
dp_ti->hactive_hi = ti->hactive_hi;
dp_ti->hactive_lo = ti->hactive_lo;
dp_ti->hblank_hi = ti->hblank_hi;
dp_ti->hblank_lo = ti->hblank_lo;
dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
dp_ti->vactive_hi = ti->vactive_hi;
dp_ti->vactive_lo = ti->vactive_lo;
dp_ti->vblank_hi = ti->vblank_hi;
dp_ti->vblank_lo = ti->vblank_lo;
dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
ret = 0;
out:
kfree(gct);
return ret;
}
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
struct mid_vbt_header vbt_header;
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, PCI_DEVFN(2, 0));
int ret = -1;
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
if (!addr)
goto out;
/* get the virtual address of the vbt */
vbt_virtual = ioremap(addr, sizeof(vbt_header));
if (!vbt_virtual)
goto out;
memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
iounmap(vbt_virtual);
if (memcmp(&vbt_header.signature, "$GCT", 4))
goto out;
dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
switch (vbt_header.revision) {
case 0x00:
ret = mid_get_vbt_data_r0(dev_priv, addr);
break;
case 0x01:
ret = mid_get_vbt_data_r1(dev_priv, addr);
break;
case 0x10:
ret = mid_get_vbt_data_r10(dev_priv, addr);
break;
default:
dev_err(dev->dev, "Unknown revision of GCT!\n");
}
out:
if (ret)
dev_err(dev->dev, "Unable to read GCT!");
else
dev_priv->has_gct = true;
}
int mid_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mid_get_fuse_settings(dev);
mid_get_vbt_data(dev_priv);
mid_get_pci_revID(dev_priv);
return 0;
}
| linux-master | drivers/gpu/drm/gma500/mid_bios.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include "gma_device.h"
#include "intel_bios.h"
#include "psb_device.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
static int psb_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_intel_lvds_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
}
/*
* Poulsbo Backlight Interfaces
*/
#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
#define BLC_PWM_FREQ_CALC_CONSTANT 32
#define MHz 1000000
#define PSB_BLC_PWM_PRECISION_FACTOR 10
#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
static int psb_backlight_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
/* u32 bl_max_freq; */
/* unsigned long value; */
u16 bl_max_freq;
uint32_t value;
uint32_t blc_pwm_precision_factor;
/* get bl_max_freq and pol from dev_priv*/
if (!dev_priv->lvds_bl) {
dev_err(dev->dev, "Has no valid LVDS backlight info\n");
return -ENOENT;
}
bl_max_freq = dev_priv->lvds_bl->freq;
blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
core_clock = dev_priv->core_freq;
value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
value *= blc_pwm_precision_factor;
value /= bl_max_freq;
value /= blc_pwm_precision_factor;
if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
return -ERANGE;
else {
value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
REG_WRITE(BLC_PWM_CTL,
(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
/* This must occur after the backlight is properly initialised */
psb_lid_timer_init(dev_priv);
return 0;
}
/*
* Provide the Poulsbo specific chip logic and low level methods
* for power management
*/
static void psb_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
gating &= ~3; /* Disable 2D clock gating */
gating |= 1;
PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
PSB_RSGX32(PSB_CR_CLKGATECTL);
}
/**
* psb_save_display_registers - save registers lost on suspend
* @dev: our DRM device
*
* Save the state we need in order to be able to restore the interface
* upon resume from suspend
*/
static int psb_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_connector *gma_connector;
struct drm_crtc *crtc;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration control + watermarks */
regs->saveDSPARB = PSB_RVDC32(DSPARB);
regs->saveDSPFW1 = PSB_RVDC32(DSPFW1);
regs->saveDSPFW2 = PSB_RVDC32(DSPFW2);
regs->saveDSPFW3 = PSB_RVDC32(DSPFW3);
regs->saveDSPFW4 = PSB_RVDC32(DSPFW4);
regs->saveDSPFW5 = PSB_RVDC32(DSPFW5);
regs->saveDSPFW6 = PSB_RVDC32(DSPFW6);
regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Save crtc and output state */
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc))
dev_priv->ops->save_crtc(crtc);
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_connector = to_gma_connector(connector);
if (gma_connector->save)
gma_connector->save(connector);
}
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return 0;
}
/**
* psb_restore_display_registers - restore lost register state
* @dev: our DRM device
*
* Restore register state that was lost during suspend and resume.
*/
static int psb_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_connector *gma_connector;
struct drm_crtc *crtc;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration + watermarks */
PSB_WVDC32(regs->saveDSPARB, DSPARB);
PSB_WVDC32(regs->saveDSPFW1, DSPFW1);
PSB_WVDC32(regs->saveDSPFW2, DSPFW2);
PSB_WVDC32(regs->saveDSPFW3, DSPFW3);
PSB_WVDC32(regs->saveDSPFW4, DSPFW4);
PSB_WVDC32(regs->saveDSPFW5, DSPFW5);
PSB_WVDC32(regs->saveDSPFW6, DSPFW6);
PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT);
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (drm_helper_crtc_in_use(crtc))
dev_priv->ops->restore_crtc(crtc);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_connector = to_gma_connector(connector);
if (gma_connector->restore)
gma_connector->restore(connector);
}
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return 0;
}
static int psb_power_down(struct drm_device *dev)
{
return 0;
}
static int psb_power_up(struct drm_device *dev)
{
return 0;
}
/* Poulsbo */
static const struct psb_offset psb_regmap[2] = {
{
.fp0 = FPA0,
.fp1 = FPA1,
.cntr = DSPACNTR,
.conf = PIPEACONF,
.src = PIPEASRC,
.dpll = DPLL_A,
.htotal = HTOTAL_A,
.hblank = HBLANK_A,
.hsync = HSYNC_A,
.vtotal = VTOTAL_A,
.vblank = VBLANK_A,
.vsync = VSYNC_A,
.stride = DSPASTRIDE,
.size = DSPASIZE,
.pos = DSPAPOS,
.base = DSPABASE,
.surf = DSPASURF,
.addr = DSPABASE,
.status = PIPEASTAT,
.linoff = DSPALINOFF,
.tileoff = DSPATILEOFF,
.palette = PALETTE_A,
},
{
.fp0 = FPB0,
.fp1 = FPB1,
.cntr = DSPBCNTR,
.conf = PIPEBCONF,
.src = PIPEBSRC,
.dpll = DPLL_B,
.htotal = HTOTAL_B,
.hblank = HBLANK_B,
.hsync = HSYNC_B,
.vtotal = VTOTAL_B,
.vblank = VBLANK_B,
.vsync = VSYNC_B,
.stride = DSPBSTRIDE,
.size = DSPBSIZE,
.pos = DSPBPOS,
.base = DSPBBASE,
.surf = DSPBSURF,
.addr = DSPBBASE,
.status = PIPEBSTAT,
.linoff = DSPBLINOFF,
.tileoff = DSPBTILEOFF,
.palette = PALETTE_B,
}
};
static int psb_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->regmap = psb_regmap;
gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
return 0;
}
static void psb_chip_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
const struct psb_ops psb_chip_ops = {
.name = "Poulsbo",
.pipes = 2,
.crtcs = 2,
.hdmi_mask = (1 << 0),
.lvds_mask = (1 << 1),
.sdvo_mask = (1 << 0),
.cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
.crtc_helper = &psb_intel_helper_funcs,
.clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,
.backlight_init = psb_backlight_setup,
.backlight_set = psb_intel_lvds_set_brightness,
.backlight_name = "psb-bl",
.init_pm = psb_init_pm,
.save_regs = psb_save_display_registers,
.restore_regs = psb_restore_display_registers,
.save_crtc = gma_crtc_save,
.restore_crtc = gma_crtc_restore,
.power_down = psb_power_down,
.power_up = psb_power_up,
};
| linux-master | drivers/gpu/drm/gma500/psb_device.c |
/*
* Copyright (c) 2006 Dave Airlie <[email protected]>
* Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <[email protected]>
* Chris Wilson <[email protected]>
*/
#include <linux/delay.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (! (COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W && !(in_dbg_master())) \
msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 20
static inline struct intel_gmbus *
to_intel_gmbus(struct i2c_adapter *i2c)
{
return container_of(i2c, struct intel_gmbus, adapter);
}
struct intel_gpio {
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
struct drm_psb_private *dev_priv;
u32 reg;
};
void
gma_intel_i2c_reset(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
GMBUS_REG_WRITE(GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
{
/* When using bit bashing for I2C, this bit needs to be set to 1 */
/* FIXME: We are never Pineview, right?
u32 val;
if (!IS_PINEVIEW(dev_priv->dev))
return;
val = REG_READ(DSPCLK_GATE_D);
if (enable)
val |= DPCUNIT_CLOCK_GATE_DISABLE;
else
val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
REG_WRITE(DSPCLK_GATE_D, val);
return;
*/
}
static u32 get_reserved(struct intel_gpio *gpio)
{
struct drm_psb_private *dev_priv = gpio->dev_priv;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
reserved = GMBUS_REG_READ(gpio->reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
static int get_clock(void *data)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
GMBUS_REG_WRITE(gpio->reg, reserved);
return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
GMBUS_REG_WRITE(gpio->reg, reserved);
return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
u32 clock_bits;
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
GMBUS_REG_READ(gpio->reg); /* Posting */
}
static void set_data(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
u32 data_bits;
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
else
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
GMBUS_REG_READ(gpio->reg);
}
static struct i2c_adapter *
intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
{
static const int map_pin_to_reg[] = {
0,
GPIOB,
GPIOA,
GPIOC,
GPIOD,
GPIOE,
0,
GPIOF,
};
struct intel_gpio *gpio;
if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
return NULL;
gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
if (gpio == NULL)
return NULL;
gpio->reg = map_pin_to_reg[pin];
gpio->dev_priv = dev_priv;
snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
gpio->adapter.dev.parent = dev_priv->dev.dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
gpio->algo.getscl = get_clock;
gpio->algo.udelay = I2C_RISEFALL_TIME;
gpio->algo.timeout = usecs_to_jiffies(2200);
gpio->algo.data = gpio;
if (i2c_bit_add_bus(&gpio->adapter))
goto out_free;
return &gpio->adapter;
out_free:
kfree(gpio);
return NULL;
}
static int
intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
struct intel_gpio *gpio = container_of(adapter,
struct intel_gpio,
adapter);
int ret;
gma_intel_i2c_reset(&dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
set_data(gpio, 1);
set_clock(gpio, 1);
udelay(I2C_RISEFALL_TIME);
ret = adapter->algo->master_xfer(adapter, msgs, num);
set_data(gpio, 1);
set_clock(gpio, 1);
intel_i2c_quirk_set(dev_priv, false);
return ret;
}
static int
gmbus_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_psb_private *dev_priv = adapter->algo_data;
int i, reg_offset;
if (bus->force_bit)
return intel_i2c_quirk_xfer(dev_priv,
bus->force_bit, msgs, num);
reg_offset = 0;
GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
u16 len = msgs[i].len;
u8 *buf = msgs[i].buf;
if (msgs[i].flags & I2C_M_RD) {
GMBUS_REG_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
(i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
GMBUS_REG_READ(GMBUS2+reg_offset);
do {
u32 val, loop = 0;
if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
(GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
val = GMBUS_REG_READ(GMBUS3 + reg_offset);
do {
*buf++ = val & 0xff;
val >>= 8;
} while (--len && ++loop < 4);
} while (len);
} else {
u32 val, loop;
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
GMBUS_REG_WRITE(GMBUS1 + reg_offset,
(i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
GMBUS_REG_READ(GMBUS2+reg_offset);
while (len) {
if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
(GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
GMBUS_SATOER)
goto clear_err;
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
GMBUS_REG_READ(GMBUS2+reg_offset);
}
}
if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
goto timeout;
if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
}
goto done;
clear_err:
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
done:
/* Mark the GMBUS interface as disabled. We will re-enable it at the
* start of the next xfer, till then let it sleep.
*/
GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
return i;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
bus->reg0 & 0xff, bus->adapter.name);
GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
if (!bus->force_bit)
return -ENOMEM;
return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
}
static u32 gmbus_func(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
if (bus->force_bit)
bus->force_bit->algo->functionality(bus->force_bit);
return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
/* I2C_FUNC_10BIT_ADDR | */
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
}
static const struct i2c_algorithm gmbus_algorithm = {
.master_xfer = gmbus_xfer,
.functionality = gmbus_func
};
/**
* gma_intel_setup_gmbus() - instantiate all Intel i2c GMBuses
* @dev: DRM device
*/
int gma_intel_setup_gmbus(struct drm_device *dev)
{
static const char *names[GMBUS_NUM_PORTS] = {
"disabled",
"ssc",
"vga",
"panel",
"dpc",
"dpb",
"reserved",
"dpd",
};
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret, i;
dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
GFP_KERNEL);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
if (IS_MRST(dev))
dev_priv->gmbus_reg = dev_priv->aux_reg;
else
dev_priv->gmbus_reg = dev_priv->vdc_reg;
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"gma500 gmbus %s",
names[i]);
bus->adapter.dev.parent = dev->dev;
bus->adapter.algo_data = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
ret = i2c_add_adapter(&bus->adapter);
if (ret)
goto err;
/* By default use a conservative clock rate */
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
bus->force_bit = intel_gpio_create(dev_priv, i);
}
gma_intel_i2c_reset(&dev_priv->dev);
return 0;
err:
while (i--) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
kfree(dev_priv->gmbus);
dev_priv->gmbus = NULL;
return ret;
}
void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
/* speed:
* 0x0 = 100 KHz
* 0x1 = 50 KHz
* 0x2 = 400 KHz
* 0x3 = 1000 Khz
*/
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
}
void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
if (force_bit) {
if (bus->force_bit == NULL) {
struct drm_psb_private *dev_priv = adapter->algo_data;
bus->force_bit = intel_gpio_create(dev_priv,
bus->reg0 & 0xff);
}
} else {
if (bus->force_bit) {
i2c_del_adapter(bus->force_bit);
kfree(bus->force_bit);
bus->force_bit = NULL;
}
}
}
void gma_intel_teardown_gmbus(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (dev_priv->gmbus == NULL)
return;
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
if (bus->force_bit) {
i2c_del_adapter(bus->force_bit);
kfree(bus->force_bit);
}
i2c_del_adapter(&bus->adapter);
}
dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
kfree(dev_priv->gmbus);
dev_priv->gmbus = NULL;
}
| linux-master | drivers/gpu/drm/gma500/intel_gmbus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2009 Intel Corporation
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#define MRST_LIMIT_LVDS_100L 0
#define MRST_LIMIT_LVDS_83 1
#define MRST_LIMIT_LVDS_100 2
#define MRST_LIMIT_SDVO 3
#define MRST_DOT_MIN 19750
#define MRST_DOT_MAX 120000
#define MRST_M_MIN_100L 20
#define MRST_M_MIN_100 10
#define MRST_M_MIN_83 12
#define MRST_M_MAX_100L 34
#define MRST_M_MAX_100 17
#define MRST_M_MAX_83 20
#define MRST_P1_MIN 2
#define MRST_P1_MAX_0 7
#define MRST_P1_MAX_1 8
static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock);
static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock);
static const struct gma_limit_t mrst_limits[] = {
{ /* MRST_LIMIT_LVDS_100L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
.find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_LVDS_83L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
.find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_LVDS_100 */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
.find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_SDVO */
.vco = {.min = 1400000, .max = 2800000},
.n = {.min = 3, .max = 7},
.m = {.min = 80, .max = 137},
.p1 = {.min = 1, .max = 2},
.p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
.find_pll = mrst_sdvo_find_best_pll,
},
};
#define MRST_M_MIN 10
static const u32 oaktrail_m_converts[] = {
0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
};
static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
int refclk)
{
const struct gma_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
break;
case 166:
limit = &mrst_limits[MRST_LIMIT_LVDS_83];
break;
case 200:
limit = &mrst_limits[MRST_LIMIT_LVDS_100];
break;
}
} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &mrst_limits[MRST_LIMIT_SDVO];
} else {
limit = NULL;
dev_err(dev->dev, "mrst_limit Wrong display type.\n");
}
return limit;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
{
clock->dot = (refclk * clock->m) / (14 * clock->p1);
}
static void mrst_print_pll(struct gma_clock_t *clock)
{
DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
clock->dot, clock->m, clock->m1, clock->m2, clock->n,
clock->p1, clock->p2);
}
static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock)
{
struct gma_clock_t clock;
u32 target_vco, actual_freq;
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
/* p2 value always stored in p2_slow on SDVO */
clock.p = clock.p1 * limit->p2.p2_slow;
target_vco = target * clock.p;
/* VCO will increase at this point so break */
if (target_vco > limit->vco.max)
break;
if (target_vco < limit->vco.min)
continue;
actual_freq = (refclk * clock.m) /
(clock.n * clock.p);
freq_error = 10000 -
((target * 10000) / actual_freq);
if (freq_error < -min_error) {
/* freq_error will start to decrease at
this point so break */
break;
}
if (freq_error < 0)
freq_error = -freq_error;
if (freq_error < min_error) {
min_error = freq_error;
*best_clock = clock;
}
}
}
if (min_error == 0)
break;
}
return min_error == 0;
}
/*
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock)
{
struct gma_clock_t clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
clock.p1++) {
int this_err;
mrst_lvds_clock(refclk, &clock);
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
return err != target;
}
/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
int i;
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
oaktrail_crtc_hdmi_dpms(crtc, mode);
return;
}
if (!gma_power_begin(dev, true))
return;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
for (i = 0; i <= need_aux; i++) {
/* Enable the DPLL */
temp = REG_READ_WITH_AUX(map->dpll, i);
if ((temp & DPLL_VCO_ENABLE) == 0) {
REG_WRITE_WITH_AUX(map->dpll, temp, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE_WITH_AUX(map->dpll,
temp | DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE_WITH_AUX(map->dpll,
temp | DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
temp = REG_READ_WITH_AUX(map->conf, i);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE_WITH_AUX(map->conf,
temp | PIPEACONF_ENABLE, i);
}
/* Enable the plane */
temp = REG_READ_WITH_AUX(map->cntr, i);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE_WITH_AUX(map->cntr,
temp | DISPLAY_PLANE_ENABLE,
i);
/* Flush the plane changes */
REG_WRITE_WITH_AUX(map->base,
REG_READ_WITH_AUX(map->base, i), i);
}
}
gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
break;
case DRM_MODE_DPMS_OFF:
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
for (i = 0; i <= need_aux; i++) {
/* Disable the VGA plane that we never use */
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
/* Disable display plane */
temp = REG_READ_WITH_AUX(map->cntr, i);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE_WITH_AUX(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE, i);
/* Flush the plane changes */
REG_WRITE_WITH_AUX(map->base,
REG_READ(map->base), i);
REG_READ_WITH_AUX(map->base, i);
}
/* Next, disable display pipes */
temp = REG_READ_WITH_AUX(map->conf, i);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE_WITH_AUX(map->conf,
temp & ~PIPEACONF_ENABLE, i);
REG_READ_WITH_AUX(map->conf, i);
}
/* Wait for the pipe disable to take effect. */
gma_wait_for_vblank(dev);
temp = REG_READ_WITH_AUX(map->dpll, i);
if ((temp & DPLL_VCO_ENABLE) != 0) {
REG_WRITE_WITH_AUX(map->dpll,
temp & ~DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
}
/* Wait for the clocks to turn off. */
udelay(150);
}
break;
}
/* Set FIFO Watermarks (values taken from EMGD) */
REG_WRITE(DSPARB, 0x3f80);
REG_WRITE(DSPFW1, 0x3f8f0404);
REG_WRITE(DSPFW2, 0x04040f04);
REG_WRITE(DSPFW3, 0x0);
REG_WRITE(DSPFW4, 0x04040404);
REG_WRITE(DSPFW5, 0x04040404);
REG_WRITE(DSPFW6, 0x78);
REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
gma_power_end(dev);
}
/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
return (pfit_control >> 29) & 3;
}
static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct gma_clock_t clock;
const struct gma_limit_t *limit;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false;
bool is_mipi = false;
struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int i;
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
if (!gma_power_begin(dev, true))
return 0;
drm_mode_copy(&gma_crtc->saved_mode, mode);
drm_mode_copy(&gma_crtc->saved_adjusted_mode, adjusted_mode);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
gma_encoder = gma_attached_encoder(connector);
switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
is_sdvo = true;
break;
case INTEL_OUTPUT_MIPI:
is_mipi = true;
break;
}
break;
}
if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
drm_connector_list_iter_end(&conn_iter);
/* Disable the VGA plane that we never use */
for (i = 0; i <= need_aux; i++)
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
/* Disable the panel fitter if it was on our pipe */
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1), i);
}
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
/* Moorestown doesn't have register support for centering so
* we need to mess with the h/vblank and h/vsync start and
* ends to get centering */
int offsetX = 0, offsetY = 0;
offsetX = (adjusted_mode->crtc_hdisplay -
mode->crtc_hdisplay) / 2;
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
}
} else {
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16), i);
}
}
/* Flush the plane changes */
{
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
/* setup pipeconf */
pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = REG_READ(map->cntr);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
dspcntr |= DISPPLANE_SEL_PIPE_A;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
if (is_mipi)
goto oaktrail_crtc_mode_set_exit;
dpll = 0; /*BIT16 = 0 for 100MHz reference */
refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
limit = mrst_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
refclk, &clock);
if (is_sdvo) {
/* Convert calculated values to register values */
clock.p1 = (1L << (clock.p1 - 1));
clock.m -= 2;
clock.n = (1L << (clock.n - 1));
}
if (!ok)
DRM_ERROR("Failed to find proper PLL settings");
mrst_print_pll(&clock);
if (is_sdvo)
fp = clock.n << 16 | clock.m;
else
fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
dpll |= DPLL_VGA_MODE_DIS;
dpll |= DPLL_VCO_ENABLE;
if (is_lvds)
dpll |= DPLLA_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int sdvo_pixel_multiply =
adjusted_mode->clock / mode->clock;
dpll |= DPLL_DVO_HIGH_SPEED;
dpll |=
(sdvo_pixel_multiply -
1) << SDVO_MULTIPLIER_SHIFT_HIRES;
}
/* compute bitmask from p1 value */
if (is_sdvo)
dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
else
dpll |= (1 << (clock.p1 - 2)) << 17;
dpll |= DPLL_VCO_ENABLE;
if (dpll & DPLL_VCO_ENABLE) {
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->fp0, fp, i);
REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
}
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->fp0, fp, i);
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
REG_READ_WITH_AUX(map->conf, i);
gma_wait_for_vblank(dev);
REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
gma_wait_for_vblank(dev);
}
oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
return 0;
}
static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
int ret = 0;
/* no fb bound */
if (!fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
if (!gma_power_begin(dev, true))
return 0;
start = to_psb_gem_object(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->format->cpp[0] * 8) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto pipe_set_base_exit;
}
REG_WRITE(map->cntr, dspcntr);
REG_WRITE(map->base, offset);
REG_READ(map->base);
REG_WRITE(map->surf, start);
REG_READ(map->surf);
pipe_set_base_exit:
gma_power_end(dev);
return ret;
}
const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.dpms = oaktrail_crtc_dpms,
.mode_set = oaktrail_crtc_mode_set,
.mode_set_base = oaktrail_pipe_set_base,
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
/* Not used yet */
const struct gma_clock_funcs mrst_clock_funcs = {
.clock = mrst_lvds_clock,
.limit = mrst_limit,
.pll_is_valid = gma_pll_is_valid,
};
| linux-master | drivers/gpu/drm/gma500/oaktrail_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2011 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
/*
* LVDS I2C backlight control macros
*/
#define BRIGHTNESS_MAX_LEVEL 100
#define BRIGHTNESS_MASK 0xFF
#define BLC_I2C_TYPE 0x01
#define BLC_PWM_TYPT 0x02
#define BLC_POLARITY_NORMAL 0
#define BLC_POLARITY_INVERSE 1
#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
#define PSB_BLC_PWM_PRECISION_FACTOR (10)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
struct cdv_intel_lvds_priv {
/**
* Saved LVDO output states
*/
uint32_t savePP_ON;
uint32_t savePP_OFF;
uint32_t saveLVDS;
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
uint32_t savePFIT_PGM_RATIOS;
uint32_t saveBLC_PWM_CTL;
};
/*
* Returns the maximum level of the backlight duty cycle field.
*/
static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 retval;
if (gma_power_begin(dev, false)) {
retval = ((REG_READ(BLC_PWM_CTL) &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
gma_power_end(dev);
} else
retval = ((dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
return retval;
}
/*
* Sets the backlight level.
*
* level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
*/
static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
blc_pwm_ctl =
REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL,
(blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
gma_power_end(dev);
} else {
blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
~BACKLIGHT_DUTY_CYCLE_MASK;
dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
}
}
/*
* Sets the power state for the panel.
*/
static void cdv_intel_lvds_set_power(struct drm_device *dev,
struct drm_encoder *encoder, bool on)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pp_status;
if (!gma_power_begin(dev, true))
return;
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
cdv_intel_lvds_set_backlight(dev,
dev_priv->mode_dev.backlight_duty_cycle);
} else {
cdv_intel_lvds_set_backlight(dev, 0);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
}
gma_power_end(dev);
}
static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
if (mode == DRM_MODE_DPMS_ON)
cdv_intel_lvds_set_power(dev, encoder, true);
else
cdv_intel_lvds_set_power(dev, encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
static void cdv_intel_lvds_save(struct drm_connector *connector)
{
}
static void cdv_intel_lvds_restore(struct drm_connector *connector)
{
}
static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
/* Should never happen!! */
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
head) {
if (tmp_encoder != encoder
&& tmp_encoder->crtc == encoder->crtc) {
pr_err("Can't enable LVDS and another encoder on the same pipe\n");
return false;
}
}
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (panel_fixed_mode != NULL) {
adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
adjusted_mode->htotal = panel_fixed_mode->htotal;
adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
adjusted_mode->vtotal = panel_fixed_mode->vtotal;
adjusted_mode->clock = panel_fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode,
CRTC_INTERLACE_HALVE_V);
}
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
* user's requested refresh rate.
*/
return true;
}
static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
cdv_intel_lvds_set_power(dev, encoder, false);
gma_power_end(dev);
}
static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
cdv_intel_lvds_get_max_backlight(dev);
cdv_intel_lvds_set_power(dev, encoder, true);
}
static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
u32 pfit_control;
/*
* The LVDS pin pair will already have been turned on in the
* cdv_intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay)
pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
else
pfit_control = 0;
pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
ret = psb_intel_ddc_get_modes(connector, connector->ddc);
if (ret)
return ret;
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
return 0;
}
static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
gma_i2c_destroy(to_gma_i2c_chan(connector->ddc));
gma_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
static int cdv_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
uint64_t curValue;
if (!crtc)
return -1;
switch (value) {
case DRM_MODE_SCALE_FULLSCREEN:
break;
case DRM_MODE_SCALE_NO_SCALE:
break;
case DRM_MODE_SCALE_ASPECT:
break;
default:
return -1;
}
if (drm_object_property_get_value(&connector->base,
property,
&curValue))
return -1;
if (curValue == value)
return 0;
if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
if (crtc->saved_mode.hdisplay != 0 &&
crtc->saved_mode.vdisplay != 0) {
if (!drm_crtc_helper_set_mode(encoder->crtc,
&crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
encoder->crtc->primary->fb))
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS") && encoder) {
const struct drm_encoder_helper_funcs *helpers =
encoder->helper_private;
helpers->dpms(encoder, value);
}
return 0;
}
static const struct drm_encoder_helper_funcs
cdv_intel_lvds_helper_funcs = {
.dpms = cdv_intel_lvds_encoder_dpms,
.mode_fixup = cdv_intel_lvds_mode_fixup,
.prepare = cdv_intel_lvds_prepare,
.mode_set = cdv_intel_lvds_mode_set,
.commit = cdv_intel_lvds_commit,
};
static const struct drm_connector_helper_funcs
cdv_intel_lvds_connector_helper_funcs = {
.get_modes = cdv_intel_lvds_get_modes,
.mode_valid = cdv_intel_lvds_mode_valid,
.best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_intel_lvds_set_property,
.destroy = cdv_intel_lvds_destroy,
};
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
*/
static bool lvds_is_present_in_vbt(struct drm_device *dev,
u8 *i2c_pin)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (!dev_priv->child_dev_num)
return true;
for (i = 0; i < dev_priv->child_dev_num; i++) {
struct child_device_config *child = dev_priv->child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
* old for compatibility with some BIOSes.
*/
if (child->device_type != DEVICE_TYPE_INT_LFP &&
child->device_type != DEVICE_TYPE_LFP)
continue;
if (child->i2c_pin)
*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
* information from AIM blocks, a non-zero addin offset is
* a good indicator that the LVDS is actually present.
*/
if (child->addin_offset)
return true;
/* But even then some BIOS writers perform some black magic
* and instantiate the device without reference to any
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
if (dev_priv->opregion.vbt)
return true;
}
return false;
}
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
* @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
void cdv_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct cdv_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan;
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_i2c_chan *ddc_bus;
u32 lvds;
int pipe;
int ret;
u8 pin;
if (!dev_priv->lvds_enabled_in_vbt)
return;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
gma_encoder = kzalloc(sizeof(struct gma_encoder),
GFP_KERNEL);
if (!gma_encoder)
return;
gma_connector = kzalloc(sizeof(struct gma_connector),
GFP_KERNEL);
if (!gma_connector)
goto err_free_encoder;
lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv)
goto err_free_connector;
gma_encoder->dev_priv = lvds_priv;
connector = &gma_connector->base;
gma_connector->save = cdv_intel_lvds_save;
gma_connector->restore = cdv_intel_lvds_restore;
encoder = &gma_encoder->base;
/* Set up the DDC bus. */
ddc_bus = gma_i2c_create(dev, GPIOC, "LVDSDDC_C");
if (!ddc_bus) {
dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto err_free_lvds_priv;
}
ret = drm_connector_init_with_ddc(dev, connector,
&cdv_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
&ddc_bus->base);
if (ret)
goto err_destroy_ddc;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret)
goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
&cdv_intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/*Attach connector properties*/
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
/**
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
gma_encoder->i2c_bus = gma_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
gma_encoder->i2c_bus->slave_addr = 0x2C;
dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
/*
* LVDS discovery:
* 1) check for EDID on DDC
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
* 4) make sure lid is open
* if closed, act like it's not there for now
*/
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
mutex_lock(&dev->mode_config.mutex);
psb_intel_ddc_get_modes(connector, &ddc_bus->base);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this?*/
if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
}
/*
* If we didn't get EDID, try checking if the panel is already turned
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
lvds = REG_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
mode_dev->panel_fixed_mode =
cdv_intel_crtc_mode_get(dev, crtc);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
}
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
DRM_DEBUG
("Found no modes on the lvds, ignoring the LVDS\n");
goto err_unlock;
}
/* setup PWM */
{
u32 pwm;
pwm = REG_READ(BLC_PWM_CTL2);
if (pipe == 1)
pwm |= PWM_PIPE_B;
else
pwm &= ~PWM_PIPE_B;
pwm |= PWM_ENABLE;
REG_WRITE(BLC_PWM_CTL2, pwm);
}
out:
mutex_unlock(&dev->mode_config.mutex);
return;
err_unlock:
mutex_unlock(&dev->mode_config.mutex);
gma_i2c_destroy(gma_encoder->i2c_bus);
err_encoder_cleanup:
drm_encoder_cleanup(encoder);
err_connector_cleanup:
drm_connector_cleanup(connector);
err_destroy_ddc:
gma_i2c_destroy(ddc_bus);
err_free_lvds_priv:
kfree(lvds_priv);
err_free_connector:
kfree(gma_connector);
err_free_encoder:
kfree(gma_encoder);
}
| linux-master | drivers/gpu/drm/gma500/cdv_intel_lvds.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2011 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "cdv_device.h"
#include "framebuffer.h"
#include "gma_display.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock);
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
#define CDV_LIMIT_DAC_HDMI_27 2
#define CDV_LIMIT_DAC_HDMI_96 3
#define CDV_LIMIT_DP_27 4
#define CDV_LIMIT_DP_100 5
static const struct gma_limit_t cdv_intel_limits[] = {
{ /* CDV_SINGLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
.find_pll = gma_find_best_pll,
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
.find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1809000, .max = 3564000},
.n = {.min = 1, .max = 1},
.m = {.min = 67, .max = 132},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 65, .max = 130},
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
.find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
.find_pll = gma_find_best_pll,
},
{ /* CDV_DP_27MHz */
.dot = {.min = 160000, .max = 272000},
.vco = {.min = 1809000, .max = 3564000},
.n = {.min = 1, .max = 1},
.m = {.min = 67, .max = 132},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 65, .max = 130},
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
.find_pll = cdv_intel_find_dp_pll,
},
{ /* CDV_DP_100MHz */
.dot = {.min = 160000, .max = 272000},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 164},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 162},
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
.find_pll = cdv_intel_find_dp_pll,
}
};
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W && !in_dbg_master()) \
msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
{
int ret;
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle before read\n");
return ret;
}
REG_WRITE(SB_ADDR, reg);
REG_WRITE(SB_PCKT,
SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
SET_FIELD(SB_DEST_DPLL, SB_DEST) |
SET_FIELD(0xf, SB_BYTE_ENABLE));
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle after read\n");
return ret;
}
*val = REG_READ(SB_DATA);
return 0;
}
int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
{
int ret;
static bool dpio_debug = true;
u32 temp;
if (dpio_debug) {
if (cdv_sb_read(dev, reg, &temp) == 0)
DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
}
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle before write\n");
return ret;
}
REG_WRITE(SB_ADDR, reg);
REG_WRITE(SB_DATA, val);
REG_WRITE(SB_PCKT,
SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
SET_FIELD(SB_DEST_DPLL, SB_DEST) |
SET_FIELD(0xf, SB_BYTE_ENABLE));
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle after write\n");
return ret;
}
if (dpio_debug) {
if (cdv_sb_read(dev, reg, &temp) == 0)
DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
}
return 0;
}
/* Reset the DPIO configuration register. The BIOS does this at every
* mode set.
*/
void cdv_sb_reset(struct drm_device *dev)
{
REG_WRITE(DPIO_CFG, 0);
REG_READ(DPIO_CFG);
REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
}
/* Unlike most Intel display engines, on Cedarview the DPLL registers
* are behind this sideband bus. They must be programmed while the
* DPLL reference clock is on in the DPLL control register, but before
* the DPLL is enabled in the DPLL control register.
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
u32 ref_value;
u32 lane_reg, lane_value;
cdv_sb_reset(dev);
REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
udelay(100);
/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
ref_value = 0x68A701;
cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
/*
* The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
* for the pipe A/B. Display spec 1.06 has wrong definition.
* Correct definition is like below:
*
* refclka mean use clock from same PLL
*
* if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
*
* if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
*
*/
ret = cdv_sb_read(dev, ref_sfr, &ref_value);
if (ret)
return ret;
ref_value &= ~(REF_CLK_MASK);
/* use DPLL_A for pipeB on CRT/HDMI */
if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
DRM_DEBUG_KMS("use DPLLA for pipe B\n");
ref_value |= REF_CLK_DPLLA;
} else {
DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
ref_value |= REF_CLK_DPLL;
}
ret = cdv_sb_write(dev, ref_sfr, ref_value);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_M(pipe), &m);
if (ret)
return ret;
m &= ~SB_M_DIVIDER_MASK;
m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
ret = cdv_sb_write(dev, SB_M(pipe), m);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
if (ret)
return ret;
/* Follow the BIOS to program the N_DIVIDER REG */
n_vco &= 0xFFFF;
n_vco |= 0x107;
n_vco &= ~(SB_N_VCO_SEL_MASK |
SB_N_DIVIDER_MASK |
SB_N_CB_TUNE_MASK);
n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
if (clock->vco < 2250000) {
n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
} else if (clock->vco < 2750000) {
n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
} else if (clock->vco < 3300000) {
n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
} else {
n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
}
ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_P(pipe), &p);
if (ret)
return ret;
p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
switch (clock->p2) {
case 5:
p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
break;
case 10:
p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
break;
case 14:
p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
break;
case 7:
p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
break;
default:
DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
return -EINVAL;
}
ret = cdv_sb_write(dev, SB_P(pipe), p);
if (ret)
return ret;
if (ddi_select) {
if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
lane_reg = PSB_LANE0;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
lane_reg = PSB_LANE1;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
} else {
lane_reg = PSB_LANE2;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
lane_reg = PSB_LANE3;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
cdv_sb_write(dev, lane_reg, lane_value);
}
}
return 0;
}
static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
int refclk)
{
const struct gma_limit_t *limit;
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* Now only single-channel LVDS is supported on CDV. If it is
* incorrect, please add the dual-channel LVDS.
*/
if (refclk == 96000)
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
else
limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
} else {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
else
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
}
return limit;
}
/* m1 is reserved as 0 in CDV, n is a ring counter */
static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = (refclk * clock->m) / clock->n;
clock->dot = clock->vco / clock->p;
}
static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk,
struct gma_clock_t *best_clock)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
memset(&clock, 0, sizeof(clock));
switch (refclk) {
case 27000:
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 0;
clock.m2 = 118;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 0;
clock.m2 = 98;
}
break;
case 100000:
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 5;
clock.m1 = 0;
clock.m2 = 160;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 5;
clock.m1 = 0;
clock.m2 = 133;
}
break;
default:
return false;
}
gma_crtc->clock_funcs->clock(refclk, &clock);
memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
return true;
}
#define FIFO_PIPEA (1 << 0)
#define FIFO_PIPEB (1 << 1)
static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
gma_crtc = to_gma_crtc(crtc);
if (crtc->primary->fb == NULL || !gma_crtc->active)
return false;
return true;
}
void cdv_disable_sr(struct drm_device *dev)
{
if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
/* Disable self-refresh before adjust WM */
REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
REG_READ(FW_BLC_SELF);
gma_wait_for_vblank(dev);
/* Cedarview workaround to write ovelay plane, which force to leave
* MAX_FIFO state.
*/
REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
REG_READ(OV_OVADD);
gma_wait_for_vblank(dev);
}
}
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
/* Is only one pipe enabled? */
if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
u32 fw;
fw = REG_READ(DSPFW1);
fw &= ~DSP_FIFO_SR_WM_MASK;
fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
fw &= ~CURSOR_B_FIFO_WM_MASK;
fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
REG_WRITE(DSPFW1, fw);
fw = REG_READ(DSPFW2);
fw &= ~CURSOR_A_FIFO_WM_MASK;
fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
REG_WRITE(DSPFW2, fw);
REG_WRITE(DSPFW3, 0x36000000);
/* ignore FW4 */
/* Is pipe b lvds ? */
if (gma_crtc->pipe == 1 &&
gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
REG_WRITE(DSPFW5, 0x00040330);
} else {
fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
(4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
(3 << CURSOR_B_FIFO_WM1_SHIFT) |
(4 << CURSOR_FIFO_SR_WM1_SHIFT);
REG_WRITE(DSPFW5, fw);
}
REG_WRITE(DSPFW6, 0x10);
gma_wait_for_vblank(dev);
/* enable self-refresh for single pipe active */
REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
REG_READ(FW_BLC_SELF);
gma_wait_for_vblank(dev);
} else {
/* HW team suggested values... */
REG_WRITE(DSPFW1, 0x3f880808);
REG_WRITE(DSPFW2, 0x0b020202);
REG_WRITE(DSPFW3, 0x24000000);
REG_WRITE(DSPFW4, 0x08030202);
REG_WRITE(DSPFW5, 0x01010101);
REG_WRITE(DSPFW6, 0x1d0);
gma_wait_for_vblank(dev);
dev_priv->ops->disable_sr(dev);
}
}
/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
return (pfit_control >> 29) & 0x3;
}
static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_lvds = false;
bool is_dp = false;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
const struct gma_limit_t *limit;
u32 ddi_select = 0;
bool is_edp = false;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder =
gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
ddi_select = gma_encoder->ddi_select;
switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_ANALOG:
case INTEL_OUTPUT_HDMI:
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_edp = true;
break;
default:
drm_connector_list_iter_end(&conn_iter);
DRM_ERROR("invalid output type.\n");
return 0;
}
break;
}
drm_connector_list_iter_end(&conn_iter);
if (dev_priv->dplla_96mhz)
/* low-end sku, 96/100 mhz */
refclk = 96000;
else
/* high-end sku, 27/100 mhz */
refclk = 27000;
if (is_dp || is_edp) {
/*
* Based on the spec the low-end SKU has only CRT/LVDS. So it is
* unnecessary to consider it for DP/eDP.
* On the high-end SKU, it will use the 27/100M reference clk
* for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
* it will be 27MHz. From the VBIOS code it seems that the pipe A choose
* 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
*/
if (pipe == 0)
refclk = 27000;
else
refclk = 100000;
}
if (is_lvds && dev_priv->lvds_use_ssc) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
}
drm_mode_debug_printmodeline(adjusted_mode);
limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
adjusted_mode->clock, clock.dot);
return 0;
}
dpll = DPLL_VGA_MODE_DIS;
if (is_dp || is_edp) {
cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
}
dpll |= DPLL_SYNCLOCK_ENABLE;
/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL; */
/* dpll |= (2 << 11); */
/* setup pipeconf */
pipeconf = REG_READ(map->conf);
pipeconf &= ~(PIPE_BPC_MASK);
if (is_edp) {
switch (dev_priv->edp.bpp) {
case 24:
pipeconf |= PIPE_8BPC;
break;
case 18:
pipeconf |= PIPE_6BPC;
break;
case 30:
pipeconf |= PIPE_10BPC;
break;
default:
pipeconf |= PIPE_8BPC;
break;
}
} else if (is_lvds) {
/* the BPC will be 6 if it is 18-bit LVDS panel */
if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
pipeconf |= PIPE_8BPC;
else
pipeconf |= PIPE_6BPC;
} else
pipeconf |= PIPE_8BPC;
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
dspcntr |= DISPPLANE_SEL_PIPE_A;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
REG_READ(map->dpll);
cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
u32 lvds = REG_READ(LVDS);
lvds |=
LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
LVDS_PIPEB_SELECT;
/* Set the B0-B3 data pairs corresponding to
* whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more
* thoroughly into how panels behave in the two modes.
*/
REG_WRITE(LVDS, lvds);
REG_READ(LVDS);
}
dpll |= DPLL_VCO_ENABLE;
/* Disable the panel fitter if it was on our pipe */
if (cdv_intel_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
REG_WRITE(map->dpll,
(REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
REG_WRITE(map->pos, 0);
REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
{
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
gma_wait_for_vblank(dev);
return 0;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
/* FIXME: why are we using this, should it be cdv_ in this tree ? */
static void i8xx_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
}
/* Returns the clock of the currently programmed mode of the given pipe. */
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = REG_READ(map->fp0);
else
fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = p->fp0;
else
fp = p->fp1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
}
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
if (is_lvds) {
clock.p1 =
ffs((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
if (clock.p1 == 0) {
clock.p1 = 4;
dev_err(dev->dev, "PLL %d\n", dpll);
}
clock.p2 = 14;
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
i8xx_clock(66000, &clock);
} else
i8xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
else {
clock.p1 =
((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
}
if (dpll & PLL_P2_DIVIDE_BY_4)
clock.p2 = 4;
else
clock.p2 = 2;
i8xx_clock(48000, &clock);
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
*/
return clock.dot;
}
/** Returns the currently programmed mode of the given pipe. */
struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
if (gma_power_begin(dev, false)) {
htot = REG_READ(map->htotal);
hsync = REG_READ(map->hsync);
vtot = REG_READ(map->vtotal);
vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
htot = p->htotal;
hsync = p->hsync;
vtot = p->vtotal;
vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
mode->vdisplay = (vtot & 0xffff) + 1;
mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
mode->vsync_start = (vsync & 0xffff) + 1;
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
return mode;
}
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
.mode_set = cdv_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
.disable = gma_crtc_disable,
};
const struct gma_clock_funcs cdv_clock_funcs = {
.clock = cdv_intel_clock,
.limit = cdv_intel_limit,
.pll_is_valid = gma_pll_is_valid,
};
| linux-master | drivers/gpu/drm/gma500/cdv_intel_display.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2009 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <asm/intel-mid.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
/* The max/min PWM frequency in BPCR[31:17] - */
/* The smallest number is 1 (not 0) that can fit in the
* 15-bit field of the and then*/
/* shifts to the left by one bit to get the actual 16-bit
* value that the 15-bits correspond to.*/
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BRIGHTNESS_MAX_LEVEL 100
/*
* Sets the power state for the panel.
*/
static void oaktrail_lvds_set_power(struct drm_device *dev,
struct gma_encoder *gma_encoder,
bool on)
{
u32 pp_status;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!gma_power_begin(dev, true))
return;
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
dev_priv->is_lvds_on = true;
if (dev_priv->ops->lvds_bl_power)
dev_priv->ops->lvds_bl_power(dev, true);
} else {
if (dev_priv->ops->lvds_bl_power)
dev_priv->ops->lvds_bl_power(dev, false);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
dev_priv->is_lvds_on = false;
}
gma_power_end(dev);
}
static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
if (mode == DRM_MODE_DPMS_ON)
oaktrail_lvds_set_power(dev, gma_encoder, true);
else
oaktrail_lvds_set_power(dev, gma_encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL;
struct drm_crtc *crtc = encoder->crtc;
u32 lvds_port;
uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
if (!gma_power_begin(dev, true))
return;
/*
* The LVDS pin pair will already have been turned on in the
* psb_intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
lvds_port = (REG_READ(LVDS) &
(~LVDS_PIPEB_SELECT)) |
LVDS_PORT_EN |
LVDS_BORDER_EN;
/* If the firmware says dither on Moorestown, or the BIOS does
on Oaktrail then enable dithering */
if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
REG_WRITE(LVDS, lvds_port);
/* Find the connector we're trying to set up */
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder && connector->encoder->crtc == crtc)
break;
}
if (!connector) {
drm_connector_list_iter_end(&conn_iter);
DRM_ERROR("Couldn't find connector when setting mode");
gma_power_end(dev);
return;
}
drm_object_property_get_value( &connector->base,
dev->mode_config.scaling_mode_property, &v);
drm_connector_list_iter_end(&conn_iter);
if (v == DRM_MODE_SCALE_NO_SCALE)
REG_WRITE(PFIT_CONTROL, 0);
else if (v == DRM_MODE_SCALE_ASPECT) {
if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
(mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
(mode->hdisplay * adjusted_mode->crtc_vdisplay))
REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
else if ((adjusted_mode->crtc_hdisplay *
mode->vdisplay) > (mode->hdisplay *
adjusted_mode->crtc_vdisplay))
REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
PFIT_SCALING_MODE_PILLARBOX);
else
REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
PFIT_SCALING_MODE_LETTERBOX);
} else
REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
} else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
gma_power_end(dev);
}
static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
oaktrail_lvds_set_power(dev, gma_encoder, false);
gma_power_end(dev);
}
static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
ret = ((REG_READ(BLC_PWM_CTL) &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
gma_power_end(dev);
} else
ret = ((dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
return ret;
}
static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
oaktrail_lvds_get_max_backlight(dev);
oaktrail_lvds_set_power(dev, gma_encoder, true);
}
static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
.dpms = oaktrail_lvds_dpms,
.mode_fixup = psb_intel_lvds_mode_fixup,
.prepare = oaktrail_lvds_prepare,
.mode_set = oaktrail_lvds_mode_set,
.commit = oaktrail_lvds_commit,
};
/* Returns the panel fixed mode from configuration. */
static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct drm_display_mode *mode = NULL;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
mode_dev->panel_fixed_mode = NULL;
/* Use the firmware provided data on Moorestown */
if (dev_priv->has_gct) {
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return;
mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
mode->hsync_start = mode->hdisplay + \
((ti->hsync_offset_hi << 8) | \
ti->hsync_offset_lo);
mode->hsync_end = mode->hsync_start + \
((ti->hsync_pulse_width_hi << 8) | \
ti->hsync_pulse_width_lo);
mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
ti->hblank_lo);
mode->vsync_start = \
mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
ti->vsync_offset_lo);
mode->vsync_end = \
mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
ti->vsync_pulse_width_lo);
mode->vtotal = mode->vdisplay + \
((ti->vblank_hi << 8) | ti->vblank_lo);
mode->clock = ti->pixel_clock * 10;
#if 0
pr_info("hdisplay is %d\n", mode->hdisplay);
pr_info("vdisplay is %d\n", mode->vdisplay);
pr_info("HSS is %d\n", mode->hsync_start);
pr_info("HSE is %d\n", mode->hsync_end);
pr_info("htotal is %d\n", mode->htotal);
pr_info("VSS is %d\n", mode->vsync_start);
pr_info("VSE is %d\n", mode->vsync_end);
pr_info("vtotal is %d\n", mode->vtotal);
pr_info("clock is %d\n", mode->clock);
#endif
mode_dev->panel_fixed_mode = mode;
}
/* Use the BIOS VBT mode if available */
if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
mode_dev->vbt_mode);
/* Then try the LVDS VBT mode */
if (mode_dev->panel_fixed_mode == NULL)
if (dev_priv->lfp_lvds_vbt_mode)
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev,
dev_priv->lfp_lvds_vbt_mode);
/* If we still got no mode then bail */
if (mode_dev->panel_fixed_mode == NULL)
return;
drm_mode_set_name(mode_dev->panel_fixed_mode);
drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
}
/**
* oaktrail_lvds_init - setup LVDS connectors on this device
* @dev: drm device
* @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct gma_i2c_chan *ddc_bus;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct edid *edid;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
goto err_free_encoder;
connector = &gma_connector->base;
encoder = &gma_encoder->base;
dev_priv->is_lvds_on = true;
ret = drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret)
goto err_free_connector;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret)
goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
drm_connector_helper_add(connector,
&psb_intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
mode_dev->panel_wants_dither = false;
if (dev_priv->has_gct)
mode_dev->panel_wants_dither = (dev_priv->gct_data.
Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
if (dev_priv->lvds_dither)
mode_dev->panel_wants_dither = 1;
/*
* LVDS discovery:
* 1) check for EDID on DDC
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
* 4) make sure lid is open
* if closed, act like it's not there for now
*/
edid = NULL;
mutex_lock(&dev->mode_config.mutex);
i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
if (i2c_adap)
edid = drm_get_edid(connector, i2c_adap);
if (edid == NULL && dev_priv->lpc_gpio_base) {
ddc_bus = oaktrail_lvds_i2c_init(dev);
if (!IS_ERR(ddc_bus)) {
i2c_adap = &ddc_bus->base;
edid = drm_get_edid(connector, i2c_adap);
}
}
/*
* Due to the logic in probing for i2c buses above we do not know the
* i2c_adap until now. Hence we cannot use drm_connector_init_with_ddc()
* but must instead set connector->ddc manually here.
*/
connector->ddc = i2c_adap;
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
if (edid) {
drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
kfree(edid);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
goto out; /* FIXME: check for quirks */
}
}
} else
dev_err(dev->dev, "No ddc adapter available!\n");
/*
* If we didn't get EDID, try geting panel timing
* from configuration data
*/
oaktrail_lvds_get_configuration_mode(dev, mode_dev);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
goto err_unlock;
}
out:
mutex_unlock(&dev->mode_config.mutex);
return;
err_unlock:
mutex_unlock(&dev->mode_config.mutex);
gma_i2c_destroy(to_gma_i2c_chan(connector->ddc));
drm_encoder_cleanup(encoder);
err_connector_cleanup:
drm_connector_cleanup(connector);
err_free_connector:
kfree(gma_connector);
err_free_encoder:
kfree(gma_encoder);
}
| linux-master | drivers/gpu/drm/gma500/oaktrail_lvds.c |
/**************************************************************************
* Copyright (c) 2009-2011, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Benjamin Defnet <[email protected]>
* Rajesh Poornachandran <[email protected]>
* Massively reworked
* Alan Cox <[email protected]>
*/
#include "gem.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include "psb_irq.h"
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
/**
* gma_power_init - initialise power manager
* @dev: our device
*
* Set up for power management tracking of our hardware.
*/
void gma_power_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* FIXME: Move APM/OSPM base into relevant device code */
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
dev_priv->ospm_base &= 0xffff;
if (dev_priv->ops->init_pm)
dev_priv->ops->init_pm(dev);
/*
* Runtime pm support is broken atm. So for now unconditionally
* call pm_runtime_get() here and put it again in psb_driver_unload()
*
* To fix this we need to call pm_runtime_get() once for each active
* pipe at boot and then put() / get() for each pipe disable / enable
* so that the device gets runtime suspended when no pipes are active.
* Once this is in place the pm_runtime_get() below should be replaced
* by a pm_runtime_allow() call to undo the pm_runtime_forbid() from
* pci_pm_init().
*/
pm_runtime_get(dev->dev);
dev_priv->pm_initialized = true;
}
/**
* gma_power_uninit - end power manager
* @dev: device to end for
*
* Undo the effects of gma_power_init
*/
void gma_power_uninit(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!dev_priv->pm_initialized)
return;
pm_runtime_put_noidle(dev->dev);
}
/**
* gma_suspend_display - suspend the display logic
* @dev: our DRM device
*
* Suspend the display logic of the graphics interface
*/
static void gma_suspend_display(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->ops->save_regs(dev);
dev_priv->ops->power_down(dev);
}
/**
* gma_resume_display - resume display side logic
* @pdev: PCI device
*
* Resume the display hardware restoring state and enabling
* as necessary.
*/
static void gma_resume_display(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* turn on the display power island */
dev_priv->ops->power_up(dev);
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
/* Rebuild our GTT mappings */
psb_gtt_resume(dev);
psb_gem_mm_resume(dev);
dev_priv->ops->restore_regs(dev);
}
/**
* gma_suspend_pci - suspend PCI side
* @pdev: PCI device
*
* Perform the suspend processing on our PCI device state
*/
static void gma_suspend_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int bsm, vbt;
pci_save_state(pdev);
pci_read_config_dword(pdev, 0x5C, &bsm);
dev_priv->regs.saveBSM = bsm;
pci_read_config_dword(pdev, 0xFC, &vbt);
dev_priv->regs.saveVBT = vbt;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
}
/**
* gma_resume_pci - resume helper
* @pdev: our PCI device
*
* Perform the resume processing on our PCI device state - rewrite
* register state and re-enable the PCI device
*/
static int gma_resume_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
return pci_enable_device(pdev);
}
/**
* gma_power_suspend - bus callback for suspend
* @_dev: our device
*
* Called back by the PCI layer during a suspend of the system. We
* perform the necessary shut down steps and save enough state that
* we can undo this when resume is called.
*/
int gma_power_suspend(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
gma_irq_uninstall(dev);
gma_suspend_display(dev);
gma_suspend_pci(pdev);
return 0;
}
/**
* gma_power_resume - resume power
* @_dev: our device
*
* Resume the PCI side of the graphics and then the displays
*/
int gma_power_resume(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
gma_resume_pci(pdev);
gma_resume_display(pdev);
gma_irq_install(dev);
return 0;
}
/**
* gma_power_begin - begin requiring power
* @dev: our DRM device
* @force_on: true to force power on
*
* Begin an action that requires the display power island is enabled.
* We refcount the islands.
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
if (force_on)
return pm_runtime_resume_and_get(dev->dev) == 0;
else
return pm_runtime_get_if_in_use(dev->dev) == 1;
}
/**
* gma_power_end - end use of power
* @dev: Our DRM device
*
* Indicate that one of our gma_power_begin() requested periods when
* the diplay island power is needed has completed.
*/
void gma_power_end(struct drm_device *dev)
{
pm_runtime_put(dev->dev);
}
| linux-master | drivers/gpu/drm/gma500/power.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* psb GEM interface
*
* Copyright (c) 2011, Intel Corporation.
*
* Authors: Alan Cox
*
* TODO:
* - we need to work out if the MMU is relevant (eg for
* accelerated operations on a GEM object)
*/
#include <linux/pagemap.h>
#include <asm/set_memory.h>
#include <drm/drm.h>
#include <drm/drm_vma_manager.h>
#include "gem.h"
#include "psb_drv.h"
/*
* PSB GEM object
*/
int psb_gem_pin(struct psb_gem_object *pobj)
{
struct drm_gem_object *obj = &pobj->base;
struct drm_device *dev = obj->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
struct page **pages;
unsigned int npages;
int ret;
ret = dma_resv_lock(obj->resv, NULL);
if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
return ret;
if (pobj->in_gart || pobj->stolen)
goto out; /* already mapped */
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto err_dma_resv_unlock;
}
npages = obj->size / PAGE_SIZE;
set_pages_array_wc(pages, npages);
psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
(gpu_base + pobj->offset), npages, 0, 0,
PSB_MMU_CACHED_MEMORY);
pobj->pages = pages;
out:
++pobj->in_gart;
dma_resv_unlock(obj->resv);
return 0;
err_dma_resv_unlock:
dma_resv_unlock(obj->resv);
return ret;
}
void psb_gem_unpin(struct psb_gem_object *pobj)
{
struct drm_gem_object *obj = &pobj->base;
struct drm_device *dev = obj->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
unsigned long npages;
int ret;
ret = dma_resv_lock(obj->resv, NULL);
if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
return;
WARN_ON(!pobj->in_gart);
--pobj->in_gart;
if (pobj->in_gart || pobj->stolen)
goto out;
npages = obj->size / PAGE_SIZE;
psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
(gpu_base + pobj->offset), npages, 0, 0);
psb_gtt_remove_pages(dev_priv, &pobj->resource);
/* Reset caching flags */
set_pages_array_wb(pobj->pages, npages);
drm_gem_put_pages(obj, pobj->pages, true, false);
pobj->pages = NULL;
out:
dma_resv_unlock(obj->resv);
}
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
drm_gem_object_release(obj);
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
kfree(pobj);
}
static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs psb_gem_object_funcs = {
.free = psb_gem_free_object,
.vm_ops = &psb_gem_vm_ops,
};
struct psb_gem_object *
psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gem_object *pobj;
struct drm_gem_object *obj;
int ret;
size = roundup(size, PAGE_SIZE);
pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
if (!pobj)
return ERR_PTR(-ENOMEM);
obj = &pobj->base;
/* GTT resource */
ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
&pobj->offset);
if (ret)
goto err_kfree;
if (stolen) {
pobj->stolen = true;
pobj->in_gart = 1;
}
/* GEM object */
obj->funcs = &psb_gem_object_funcs;
if (stolen) {
drm_gem_private_object_init(dev, obj, size);
} else {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto err_release_resource;
/* Limit the object to 32-bit mappings */
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
}
return pobj;
err_release_resource:
release_resource(&pobj->resource);
err_kfree:
kfree(pobj);
return ERR_PTR(ret);
}
/**
* psb_gem_dumb_create - create a dumb buffer
* @file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
* Allocate a buffer suitable for use for a frame buffer of the
* form described by user space. Give userspace a handle by which
* to reference it.
*/
int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
size_t pitch, size;
struct psb_gem_object *pobj;
struct drm_gem_object *obj;
u32 handle;
int ret;
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
pitch = ALIGN(pitch, 64);
size = pitch * args->height;
size = roundup(size, PAGE_SIZE);
if (!size)
return -EINVAL;
pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
if (IS_ERR(pobj))
return PTR_ERR(pobj);
obj = &pobj->base;
ret = drm_gem_handle_create(file, obj, &handle);
if (ret)
goto err_drm_gem_object_put;
drm_gem_object_put(obj);
args->pitch = pitch;
args->size = size;
args->handle = handle;
return 0;
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
}
/**
* psb_gem_fault - pagefault handler for GEM objects
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
* does most of the work for us including the actual map/unmap calls
* but we need to do the actual page work.
*
* This code eventually needs to handle faulting objects in and out
* of the GTT and repacking it when we run out of space. We can put
* that off for now and for our simple uses
*
* The VMA was set up by GEM. In doing so it also ensured that the
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
struct psb_gem_object *pobj;
int err;
vm_fault_t ret;
unsigned long pfn;
pgoff_t page_offset;
struct drm_device *dev;
struct drm_psb_private *dev_priv;
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
dev_priv = to_drm_psb_private(dev);
pobj = to_psb_gem_object(obj);
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
mutex_lock(&dev_priv->mmap_mutex);
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
if (pobj->mmapping == 0) {
err = psb_gem_pin(pobj);
if (err < 0) {
dev_err(dev->dev, "gma500: pin failed: %d\n", err);
ret = vmf_error(err);
goto fail;
}
pobj->mmapping = 1;
}
/* Page relative to the VMA start - we must calculate this ourselves
because vmf->pgoff is the fake GEM offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
if (pobj->stolen)
pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(pobj->pages[page_offset]);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
return ret;
}
/*
* Memory management
*/
/* Insert vram stolen pages into the GTT. */
static void psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
{
struct drm_device *dev = &pdev->dev;
unsigned int pfn_base;
unsigned int i, num_pages;
uint32_t pte;
pfn_base = pdev->stolen_base >> PAGE_SHIFT;
num_pages = pdev->vram_stolen_size >> PAGE_SHIFT;
drm_dbg(dev, "Set up %u stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
iowrite32(pte, pdev->gtt_map + i);
}
(void)ioread32(pdev->gtt_map + i - 1);
}
int psb_gem_mm_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long stolen_size, vram_stolen_size;
struct psb_gtt *pg;
int ret;
mutex_init(&dev_priv->mmap_mutex);
pg = &dev_priv->gtt;
pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
stolen_size = vram_stolen_size;
dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
dev_priv->stolen_base, vram_stolen_size / 1024);
pg->stolen_size = stolen_size;
dev_priv->vram_stolen_size = vram_stolen_size;
dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
if (!dev_priv->vram_addr) {
dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM;
goto err_mutex_destroy;
}
psb_gem_mm_populate_stolen(dev_priv);
return 0;
err_mutex_destroy:
mutex_destroy(&dev_priv->mmap_mutex);
return ret;
}
void psb_gem_mm_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iounmap(dev_priv->vram_addr);
mutex_destroy(&dev_priv->mmap_mutex);
}
/* Re-insert all pinned GEM objects into GTT. */
static void psb_gem_mm_populate_resources(struct drm_psb_private *pdev)
{
unsigned int restored = 0, total = 0, size = 0;
struct resource *r = pdev->gtt_mem->child;
struct drm_device *dev = &pdev->dev;
struct psb_gem_object *pobj;
while (r) {
/*
* TODO: GTT restoration needs a refactoring, so that we don't have to touch
* struct psb_gem_object here. The type represents a GEM object and is
* not related to the GTT itself.
*/
pobj = container_of(r, struct psb_gem_object, resource);
if (pobj->pages) {
psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
size += resource_size(&pobj->resource);
++restored;
}
r = r->sibling;
++total;
}
drm_dbg(dev, "Restored %u of %u gtt ranges (%u KB)", restored, total, (size / 1024));
}
int psb_gem_mm_resume(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long stolen_size, vram_stolen_size;
struct psb_gtt *pg;
pg = &dev_priv->gtt;
pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
stolen_size = vram_stolen_size;
dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n", dev_priv->stolen_base,
vram_stolen_size / 1024);
if (stolen_size != pg->stolen_size) {
dev_err(dev->dev, "GTT resume error.\n");
return -EINVAL;
}
psb_gem_mm_populate_stolen(dev_priv);
psb_gem_mm_populate_resources(dev_priv);
return 0;
}
| linux-master | drivers/gpu/drm/gma500/gem.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
* Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
* All Rights Reserved.
*
**************************************************************************/
#include <linux/aperture.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <asm/set_memory.h>
#include <acpi/video.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
#include "gem.h"
#include "intel_bios.h"
#include "mid_bios.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_irq.h"
#include "psb_reg.h"
static const struct drm_driver driver;
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
/*
* The table below contains a mapping of the PCI vendor ID and the PCI Device ID
* to the different groups of PowerVR 5-series chip designs
*
* 0x8086 = Intel Corporation
*
* PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
* PowerVR SGX535 - Moorestown - Intel GMA 600
* PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
* PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
* PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
* N2800
*/
static const struct pci_device_id pciidlist[] = {
/* Poulsbo */
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
/* Oak Trail */
{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
/* Cedar Trail */
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
/*
* Standard IOCTLs.
*/
static const struct drm_ioctl_desc psb_ioctls[] = {
};
/**
* psb_spank - reset the 2D engine
* @dev_priv: our PSB DRM device
*
* Soft reset the graphics engine and then reload the necessary registers.
*/
static void psb_spank(struct drm_psb_private *dev_priv)
{
PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
PSB_RSGX32(PSB_CR_SOFT_RESET);
msleep(1);
PSB_WSGX32(0, PSB_CR_SOFT_RESET);
wmb();
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
wmb();
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
msleep(1);
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
}
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
uint32_t stolen_gtt;
if (pg->mmu_gatt_start & 0x0FFFFFFF) {
dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
return -EINVAL;
}
stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024;
spin_lock_init(&dev_priv->irqmask_lock);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
PSB_RSGX32(PSB_CR_BIF_BANK1);
/* Do not bypass any MMU access, let them pagefault instead */
PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
PSB_CR_BIF_CTRL);
PSB_RSGX32(PSB_CR_BIF_CTRL);
psb_spank(dev_priv);
/* mmu_gatt ?? */
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
return 0;
}
static void psb_driver_unload(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* TODO: Kill vblank etc here */
gma_backlight_exit(dev);
psb_modeset_cleanup(dev);
gma_irq_uninstall(dev);
if (dev_priv->ops->chip_teardown)
dev_priv->ops->chip_teardown(dev);
psb_intel_opregion_fini(dev);
if (dev_priv->pf_pd) {
psb_mmu_free_pagedir(dev_priv->pf_pd);
dev_priv->pf_pd = NULL;
}
if (dev_priv->mmu) {
struct psb_gtt *pg = &dev_priv->gtt;
psb_mmu_remove_pfn_sequence(
psb_mmu_get_default_pd
(dev_priv->mmu),
pg->mmu_gatt_start,
dev_priv->vram_stolen_size >> PAGE_SHIFT);
psb_mmu_driver_takedown(dev_priv->mmu);
dev_priv->mmu = NULL;
}
psb_gem_mm_fini(dev);
psb_gtt_fini(dev);
if (dev_priv->scratch_page) {
set_pages_wb(dev_priv->scratch_page, 1);
__free_page(dev_priv->scratch_page);
dev_priv->scratch_page = NULL;
}
if (dev_priv->vdc_reg) {
iounmap(dev_priv->vdc_reg);
dev_priv->vdc_reg = NULL;
}
if (dev_priv->sgx_reg) {
iounmap(dev_priv->sgx_reg);
dev_priv->sgx_reg = NULL;
}
if (dev_priv->aux_reg) {
iounmap(dev_priv->aux_reg);
dev_priv->aux_reg = NULL;
}
pci_dev_put(dev_priv->aux_pdev);
pci_dev_put(dev_priv->lpc_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
gma_power_uninit(dev);
}
static void psb_device_release(void *data)
{
struct drm_device *dev = data;
psb_driver_unload(dev);
}
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long resource_start, resource_len;
unsigned long irqflags;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct gma_encoder *gma_encoder;
struct psb_gtt *pg;
int ret = -ENOMEM;
/* initializing driver private data */
dev_priv->ops = (struct psb_ops *)flags;
pg = &dev_priv->gtt;
pci_set_master(pdev);
dev_priv->num_pipe = dev_priv->ops->pipes;
resource_start = pci_resource_start(pdev, PSB_MMIO_RESOURCE);
dev_priv->vdc_reg =
ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
if (!dev_priv->vdc_reg)
goto out_err;
dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
PSB_SGX_SIZE);
if (!dev_priv->sgx_reg)
goto out_err;
if (IS_MRST(dev)) {
int domain = pci_domain_nr(pdev->bus);
dev_priv->aux_pdev =
pci_get_domain_bus_and_slot(domain, 0,
PCI_DEVFN(3, 0));
if (dev_priv->aux_pdev) {
resource_start = pci_resource_start(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
dev_priv->aux_reg = ioremap(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
DRM_DEBUG_KMS("Found aux vdc");
} else {
/* Couldn't find the aux vdc so map to primary vdc */
dev_priv->aux_reg = dev_priv->vdc_reg;
DRM_DEBUG_KMS("Couldn't find aux pci device");
}
dev_priv->gmbus_reg = dev_priv->aux_reg;
dev_priv->lpc_pdev =
pci_get_domain_bus_and_slot(domain, 0,
PCI_DEVFN(31, 0));
if (dev_priv->lpc_pdev) {
pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
&dev_priv->lpc_gpio_base);
pci_write_config_dword(dev_priv->lpc_pdev, PSB_LPC_GBA,
(u32)dev_priv->lpc_gpio_base | (1L<<31));
pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
&dev_priv->lpc_gpio_base);
dev_priv->lpc_gpio_base &= 0xffc0;
if (dev_priv->lpc_gpio_base)
DRM_DEBUG_KMS("Found LPC GPIO at 0x%04x\n",
dev_priv->lpc_gpio_base);
else {
pci_dev_put(dev_priv->lpc_pdev);
dev_priv->lpc_pdev = NULL;
}
}
} else {
dev_priv->gmbus_reg = dev_priv->vdc_reg;
}
psb_intel_opregion_setup(dev);
ret = dev_priv->ops->chip_setup(dev);
if (ret)
goto out_err;
/* Init OSPM support */
gma_power_init(dev);
ret = -ENOMEM;
dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
if (!dev_priv->scratch_page)
goto out_err;
set_pages_uc(dev_priv->scratch_page, 1);
ret = psb_gtt_init(dev);
if (ret)
goto out_err;
ret = psb_gem_mm_init(dev);
if (ret)
goto out_err;
ret = -ENOMEM;
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, NULL);
if (!dev_priv->mmu)
goto out_err;
dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
if (!dev_priv->pf_pd)
goto out_err;
ret = psb_do_init(dev);
if (ret)
return ret;
/* Add stolen memory to SGX MMU */
ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
dev_priv->stolen_base >> PAGE_SHIFT,
pg->gatt_start,
pg->stolen_size >> PAGE_SHIFT, 0);
psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
acpi_video_register();
/* Setup vertical blanking handling */
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_err;
/*
* Install interrupt handlers prior to powering off SGX or else we will
* crash.
*/
dev_priv->vdc_irq_mask = 0;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
dev_priv->pipestat[2] = 0;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
gma_irq_install(dev);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
psb_modeset_init(dev);
drm_kms_helper_poll_init(dev);
/* Only add backlight support if we have LVDS or MIPI output */
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_encoder = gma_attached_encoder(connector);
if (gma_encoder->type == INTEL_OUTPUT_LVDS ||
gma_encoder->type == INTEL_OUTPUT_MIPI) {
ret = gma_backlight_init(dev);
if (ret == 0)
acpi_video_register_backlight();
break;
}
}
drm_connector_list_iter_end(&conn_iter);
if (ret)
return ret;
psb_intel_opregion_enable_asle(dev);
return devm_add_action_or_reset(dev->dev, psb_device_release, dev);
out_err:
psb_driver_unload(dev);
return ret;
}
/*
* Hardware for gma500 is a hybrid device, which both acts as a PCI
* device (for legacy vga functionality) but also more like an
* integrated display on a SoC where the framebuffer simply
* resides in main memory and not in a special PCI bar (that
* internally redirects to a stolen range of main memory) like all
* other integrated PCI display devices implement it.
*
* To catch all cases we need to remove conflicting firmware devices
* for the stolen system memory and for the VGA functionality. As we
* currently cannot easily find the framebuffer's location in stolen
* memory, we remove all framebuffers here.
*
* TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then
* we might be able to read the framebuffer range from the
* device.
*/
static int gma_remove_conflicting_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver)
{
resource_size_t base = 0;
resource_size_t size = U32_MAX; /* 4 GiB HW limit */
const char *name = req_driver->name;
int ret;
ret = aperture_remove_conflicting_devices(base, size, name);
if (ret)
return ret;
return __aperture_remove_legacy_vga_devices(pdev);
}
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_psb_private *dev_priv;
struct drm_device *dev;
int ret;
ret = gma_remove_conflicting_framebuffers(pdev, &driver);
if (ret)
return ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
dev_priv = devm_drm_dev_alloc(&pdev->dev, &driver, struct drm_psb_private, dev);
if (IS_ERR(dev_priv))
return PTR_ERR(dev_priv);
dev = &dev_priv->dev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
if (ret)
return ret;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
return ret;
psb_fbdev_setup(dev_priv);
return 0;
}
static void psb_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(psb_pm_ops, gma_power_suspend, gma_power_resume, NULL);
static const struct file_operations psb_gem_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
};
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.dumb_create = psb_gem_dumb_create,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL
};
static struct pci_driver psb_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = psb_pci_probe,
.remove = psb_pci_remove,
.driver.pm = &psb_pm_ops,
};
static int __init psb_init(void)
{
if (drm_firmware_drivers_only())
return -ENODEV;
return pci_register_driver(&psb_pci_driver);
}
static void __exit psb_exit(void)
{
pci_unregister_driver(&psb_pci_driver);
}
late_initcall(psb_init);
module_exit(psb_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/gma500/psb_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
**************************************************************************/
#include <linux/spinlock.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
static void psb_lid_timer_func(struct timer_list *t)
{
struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
u32 __iomem *lid_state = dev_priv->opregion.lid_state;
u32 pp_status;
if (readl(lid_state) == dev_priv->lid_last_state)
goto lid_timer_schedule;
if ((readl(lid_state)) & 0x01) {
/*lid state is open*/
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0 &&
(pp_status & PP_SEQUENCE_MASK) != 0);
if (REG_READ(PP_STATUS) & PP_ON) {
/*FIXME: should be backlight level before*/
psb_intel_lvds_set_brightness(dev, 100);
} else {
DRM_DEBUG("LVDS panel never powered up");
return;
}
} else {
psb_intel_lvds_set_brightness(dev, 0);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
}
dev_priv->lid_last_state = readl(lid_state);
lid_timer_schedule:
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
if (!timer_pending(lid_timer)) {
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
}
spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
}
void psb_lid_timer_init(struct drm_psb_private *dev_priv)
{
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
spin_lock_init(&dev_priv->lid_lock);
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
timer_setup(lid_timer, psb_lid_timer_func, 0);
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
}
void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
{
del_timer_sync(&dev_priv->lid_timer);
}
| linux-master | drivers/gpu/drm/gma500/psb_lid.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <drm/drm.h>
#include "intel_bios.h"
#include "mid_bios.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
static int oaktrail_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->iLVDS_enable)
oaktrail_lvds_init(dev, &dev_priv->mode_dev);
else
dev_err(dev->dev, "DSI is not supported\n");
if (dev_priv->hdmi_priv)
oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
}
/*
* Provide the low level interfaces for the Moorestown backlight
*/
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
#define BLC_PWM_FREQ_CALC_CONSTANT 32
#define MHz 1000000
#define BLC_ADJUSTMENT_MAX 100
static void oaktrail_set_brightness(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
u32 max_pwm_blc;
if (gma_power_begin(dev, 0)) {
/* Calculate and set the brightness value */
max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
blc_pwm_ctl = level * max_pwm_blc / 100;
/* Adjust the backlight level with the percent in
* dev_priv->blc_adj1;
*/
blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
blc_pwm_ctl = blc_pwm_ctl / 100;
/* Adjust the backlight level with the percent in
* dev_priv->blc_adj2;
*/
blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
blc_pwm_ctl = blc_pwm_ctl / 100;
/* force PWM bit on */
REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
gma_power_end(dev);
}
}
static int oaktrail_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
u16 bl_max_freq;
uint32_t value;
uint32_t blc_pwm_precision_factor;
dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
bl_max_freq = 256;
/* this needs to be set elsewhere */
blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
core_clock = dev_priv->core_freq;
value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
value *= blc_pwm_precision_factor;
value /= bl_max_freq;
value /= blc_pwm_precision_factor;
if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
return -ERANGE;
if (gma_power_begin(dev, false)) {
REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
REG_WRITE(BLC_PWM_CTL, value | (value << 16));
gma_power_end(dev);
}
oaktrail_set_brightness(dev, PSB_MAX_BRIGHTNESS);
return 0;
}
/*
* Provide the Moorestown specific chip logic and low level methods
* for power management
*/
/**
* oaktrail_save_display_registers - save registers lost on suspend
* @dev: our DRM device
*
* Save the state we need in order to be able to restore the interface
* upon resume from suspend
*/
static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = ®s->pipe[0];
int i;
u32 pp_stat;
/* Display arbitration control + watermarks */
regs->psb.saveDSPARB = PSB_RVDC32(DSPARB);
regs->psb.saveDSPFW1 = PSB_RVDC32(DSPFW1);
regs->psb.saveDSPFW2 = PSB_RVDC32(DSPFW2);
regs->psb.saveDSPFW3 = PSB_RVDC32(DSPFW3);
regs->psb.saveDSPFW4 = PSB_RVDC32(DSPFW4);
regs->psb.saveDSPFW5 = PSB_RVDC32(DSPFW5);
regs->psb.saveDSPFW6 = PSB_RVDC32(DSPFW6);
regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Pipe & plane A info */
p->conf = PSB_RVDC32(PIPEACONF);
p->src = PSB_RVDC32(PIPEASRC);
p->fp0 = PSB_RVDC32(MRST_FPA0);
p->fp1 = PSB_RVDC32(MRST_FPA1);
p->dpll = PSB_RVDC32(MRST_DPLL_A);
p->htotal = PSB_RVDC32(HTOTAL_A);
p->hblank = PSB_RVDC32(HBLANK_A);
p->hsync = PSB_RVDC32(HSYNC_A);
p->vtotal = PSB_RVDC32(VTOTAL_A);
p->vblank = PSB_RVDC32(VBLANK_A);
p->vsync = PSB_RVDC32(VSYNC_A);
regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
p->cntr = PSB_RVDC32(DSPACNTR);
p->stride = PSB_RVDC32(DSPASTRIDE);
p->addr = PSB_RVDC32(DSPABASE);
p->surf = PSB_RVDC32(DSPASURF);
p->linoff = PSB_RVDC32(DSPALINOFF);
p->tileoff = PSB_RVDC32(DSPATILEOFF);
/* Save cursor regs */
regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
regs->psb.saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
regs->psb.saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
/* Save palette (gamma) */
for (i = 0; i < 256; i++)
p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_save(dev);
/* Save performance state */
regs->psb.savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
/* LVDS state */
regs->psb.savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
regs->psb.savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
regs->psb.savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
regs->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
regs->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
regs->psb.saveLVDS = PSB_RVDC32(LVDS);
regs->psb.savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
regs->psb.savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
regs->psb.savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
regs->psb.savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
/* HW overlay */
regs->psb.saveOV_OVADD = PSB_RVDC32(OV_OVADD);
regs->psb.saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
regs->psb.saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
regs->psb.saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
regs->psb.saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
regs->psb.saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
regs->psb.saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
/* DPST registers */
regs->psb.saveHISTOGRAM_INT_CONTROL_REG =
PSB_RVDC32(HISTOGRAM_INT_CONTROL);
regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG =
PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
regs->psb.savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
if (dev_priv->iLVDS_enable) {
/* Shut down the panel */
PSB_WVDC32(0, PP_CONTROL);
do {
pp_stat = PSB_RVDC32(PP_STATUS);
} while (pp_stat & 0x80000000);
/* Turn off the plane */
PSB_WVDC32(0x58000000, DSPACNTR);
/* Trigger the plane disable */
PSB_WVDC32(0, DSPASURF);
/* Wait ~4 ticks */
msleep(4);
/* Turn off pipe */
PSB_WVDC32(0x0, PIPEACONF);
/* Wait ~8 ticks */
msleep(8);
/* Turn off PLLs */
PSB_WVDC32(0, MRST_DPLL_A);
}
return 0;
}
/**
* oaktrail_restore_display_registers - restore lost register state
* @dev: our DRM device
*
* Restore register state that was lost during suspend and resume.
*/
static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = ®s->pipe[0];
u32 pp_stat;
int i;
/* Display arbitration + watermarks */
PSB_WVDC32(regs->psb.saveDSPARB, DSPARB);
PSB_WVDC32(regs->psb.saveDSPFW1, DSPFW1);
PSB_WVDC32(regs->psb.saveDSPFW2, DSPFW2);
PSB_WVDC32(regs->psb.saveDSPFW3, DSPFW3);
PSB_WVDC32(regs->psb.saveDSPFW4, DSPFW4);
PSB_WVDC32(regs->psb.saveDSPFW5, DSPFW5);
PSB_WVDC32(regs->psb.saveDSPFW6, DSPFW6);
PSB_WVDC32(regs->psb.saveCHICKENBIT, DSPCHICKENBIT);
/* Make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
/* set the plls */
PSB_WVDC32(p->fp0, MRST_FPA0);
PSB_WVDC32(p->fp1, MRST_FPA1);
/* Actually enable it */
PSB_WVDC32(p->dpll, MRST_DPLL_A);
udelay(150);
/* Restore mode */
PSB_WVDC32(p->htotal, HTOTAL_A);
PSB_WVDC32(p->hblank, HBLANK_A);
PSB_WVDC32(p->hsync, HSYNC_A);
PSB_WVDC32(p->vtotal, VTOTAL_A);
PSB_WVDC32(p->vblank, VBLANK_A);
PSB_WVDC32(p->vsync, VSYNC_A);
PSB_WVDC32(p->src, PIPEASRC);
PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
/* Restore performance mode*/
PSB_WVDC32(regs->psb.savePERF_MODE, MRST_PERF_MODE);
/* Enable the pipe*/
if (dev_priv->iLVDS_enable)
PSB_WVDC32(p->conf, PIPEACONF);
/* Set up the plane*/
PSB_WVDC32(p->linoff, DSPALINOFF);
PSB_WVDC32(p->stride, DSPASTRIDE);
PSB_WVDC32(p->tileoff, DSPATILEOFF);
/* Enable the plane */
PSB_WVDC32(p->cntr, DSPACNTR);
PSB_WVDC32(p->surf, DSPASURF);
/* Enable Cursor A */
PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
PSB_WVDC32(regs->psb.saveDSPACURSOR_POS, CURAPOS);
PSB_WVDC32(regs->psb.saveDSPACURSOR_BASE, CURABASE);
/* Restore palette (gamma) */
for (i = 0; i < 256; i++)
PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_restore(dev);
if (dev_priv->iLVDS_enable) {
PSB_WVDC32(regs->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
PSB_WVDC32(regs->psb.saveLVDS, LVDS); /*port 61180h*/
PSB_WVDC32(regs->psb.savePFIT_CONTROL, PFIT_CONTROL);
PSB_WVDC32(regs->psb.savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
PSB_WVDC32(regs->psb.savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
PSB_WVDC32(regs->saveBLC_PWM_CTL, BLC_PWM_CTL);
PSB_WVDC32(regs->psb.savePP_ON_DELAYS, LVDSPP_ON);
PSB_WVDC32(regs->psb.savePP_OFF_DELAYS, LVDSPP_OFF);
PSB_WVDC32(regs->psb.savePP_DIVISOR, PP_CYCLE);
PSB_WVDC32(regs->psb.savePP_CONTROL, PP_CONTROL);
}
/* Wait for cycle delay */
do {
pp_stat = PSB_RVDC32(PP_STATUS);
} while (pp_stat & 0x08000000);
/* Wait for panel power up */
do {
pp_stat = PSB_RVDC32(PP_STATUS);
} while (pp_stat & 0x10000000);
/* Restore HW overlay */
PSB_WVDC32(regs->psb.saveOV_OVADD, OV_OVADD);
PSB_WVDC32(regs->psb.saveOV_OGAMC0, OV_OGAMC0);
PSB_WVDC32(regs->psb.saveOV_OGAMC1, OV_OGAMC1);
PSB_WVDC32(regs->psb.saveOV_OGAMC2, OV_OGAMC2);
PSB_WVDC32(regs->psb.saveOV_OGAMC3, OV_OGAMC3);
PSB_WVDC32(regs->psb.saveOV_OGAMC4, OV_OGAMC4);
PSB_WVDC32(regs->psb.saveOV_OGAMC5, OV_OGAMC5);
/* DPST registers */
PSB_WVDC32(regs->psb.saveHISTOGRAM_INT_CONTROL_REG,
HISTOGRAM_INT_CONTROL);
PSB_WVDC32(regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG,
HISTOGRAM_LOGIC_CONTROL);
PSB_WVDC32(regs->psb.savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
return 0;
}
/**
* oaktrail_power_down - power down the display island
* @dev: our DRM device
*
* Power down the display interface of our device
*/
static int oaktrail_power_down(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask ;
u32 pwr_sts;
pwr_mask = PSB_PWRGT_DISPLAY_MASK;
outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
while (true) {
pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
if ((pwr_sts & pwr_mask) == pwr_mask)
break;
else
udelay(10);
}
return 0;
}
/*
* oaktrail_power_up
*
* Restore power to the specified island(s) (powergating)
*/
static int oaktrail_power_up(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
u32 pwr_sts, pwr_cnt;
pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
pwr_cnt &= ~pwr_mask;
outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
while (true) {
pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
if ((pwr_sts & pwr_mask) == 0)
break;
else
udelay(10);
}
return 0;
}
/* Oaktrail */
static const struct psb_offset oaktrail_regmap[2] = {
{
.fp0 = MRST_FPA0,
.fp1 = MRST_FPA1,
.cntr = DSPACNTR,
.conf = PIPEACONF,
.src = PIPEASRC,
.dpll = MRST_DPLL_A,
.htotal = HTOTAL_A,
.hblank = HBLANK_A,
.hsync = HSYNC_A,
.vtotal = VTOTAL_A,
.vblank = VBLANK_A,
.vsync = VSYNC_A,
.stride = DSPASTRIDE,
.size = DSPASIZE,
.pos = DSPAPOS,
.surf = DSPASURF,
.addr = MRST_DSPABASE,
.base = MRST_DSPABASE,
.status = PIPEASTAT,
.linoff = DSPALINOFF,
.tileoff = DSPATILEOFF,
.palette = PALETTE_A,
},
{
.fp0 = FPB0,
.fp1 = FPB1,
.cntr = DSPBCNTR,
.conf = PIPEBCONF,
.src = PIPEBSRC,
.dpll = DPLL_B,
.htotal = HTOTAL_B,
.hblank = HBLANK_B,
.hsync = HSYNC_B,
.vtotal = VTOTAL_B,
.vblank = VBLANK_B,
.vsync = VSYNC_B,
.stride = DSPBSTRIDE,
.size = DSPBSIZE,
.pos = DSPBPOS,
.surf = DSPBSURF,
.addr = DSPBBASE,
.base = DSPBBASE,
.status = PIPEBSTAT,
.linoff = DSPBLINOFF,
.tileoff = DSPBTILEOFF,
.palette = PALETTE_B,
},
};
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
dev_priv->use_msi = true;
dev_priv->regmap = oaktrail_regmap;
ret = mid_chip_setup(dev);
if (ret < 0)
return ret;
if (!dev_priv->has_gct) {
/* Now pull the BIOS data */
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
}
gma_intel_setup_gmbus(dev);
oaktrail_hdmi_setup(dev);
return 0;
}
static void oaktrail_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
gma_intel_teardown_gmbus(dev);
oaktrail_hdmi_teardown(dev);
if (!dev_priv->has_gct)
psb_intel_destroy_bios(dev);
}
const struct psb_ops oaktrail_chip_ops = {
.name = "Oaktrail",
.pipes = 2,
.crtcs = 2,
.hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0),
.sdvo_mask = (1 << 1),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = oaktrail_chip_setup,
.chip_teardown = oaktrail_teardown,
.crtc_helper = &oaktrail_helper_funcs,
.output_init = oaktrail_output_init,
.backlight_init = oaktrail_backlight_init,
.backlight_set = oaktrail_set_brightness,
.backlight_name = "oaktrail-bl",
.save_regs = oaktrail_save_display_registers,
.restore_regs = oaktrail_restore_display_registers,
.save_crtc = gma_crtc_save,
.restore_crtc = gma_crtc_restore,
.power_down = oaktrail_power_down,
.power_up = oaktrail_power_up,
.i2c_bus = 1,
};
| linux-master | drivers/gpu/drm/gma500/oaktrail_device.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2006 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
*/
#include <drm/display/drm_dp_helper.h>
#include <drm/drm.h>
#include "intel_bios.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
u8 *base = (u8 *)bdb;
int index = 0;
u16 total, current_size;
u8 current_id;
/* skip to first section */
index += bdb->header_size;
total = bdb->bdb_size;
/* walk the sections looking for section_id */
while (index < total) {
current_id = *(base + index);
index++;
current_size = *((u16 *)(base + index));
index += 2;
if (current_id == section_id)
return base + index;
index += current_size;
}
return NULL;
}
static void
parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_edp *edp;
struct edp_power_seq *edp_pps;
struct edp_link_params *edp_link_params;
uint8_t panel_type;
edp = find_section(bdb, BDB_EDP);
dev_priv->edp.bpp = 18;
if (!edp) {
if (dev_priv->edp.support) {
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
dev_priv->edp.bpp);
}
return;
}
panel_type = dev_priv->panel_type;
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
dev_priv->edp.bpp = 18;
break;
case EDP_24BPP:
dev_priv->edp.bpp = 24;
break;
case EDP_30BPP:
dev_priv->edp.bpp = 30;
break;
}
/* Get the eDP sequencing and link info */
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
dev_priv->edp.pps = *edp_pps;
DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
dev_priv->edp.pps.t11_t12);
dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
DP_LINK_BW_1_62;
switch (edp_link_params->lanes) {
case 0:
dev_priv->edp.lanes = 1;
break;
case 1:
dev_priv->edp.lanes = 2;
break;
case 3:
default:
dev_priv->edp.lanes = 4;
break;
}
DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
switch (edp_link_params->preemphasis) {
case 0:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case 1:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case 2:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case 3:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
}
switch (edp_link_params->vswing) {
case 0:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case 1:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case 2:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case 3:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
}
DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
dev_priv->edp.vswing, dev_priv->edp.preemphasis);
}
static u16
get_blocksize(void *p)
{
u16 *block_ptr, block_size;
block_ptr = (u16 *)((char *)p - 2);
block_size = *block_ptr;
return block_size;
}
static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
struct lvds_dvo_timing *dvo_timing)
{
panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
dvo_timing->hactive_lo;
panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
dvo_timing->hsync_pulse_width;
panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
dvo_timing->vactive_lo;
panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
dvo_timing->vsync_off;
panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
dvo_timing->vsync_pulse_width;
panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
if (dvo_timing->hsync_positive)
panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
if (dvo_timing->vsync_positive)
panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
drm_mode_set_name(panel_fixed_mode);
}
static void parse_backlight_data(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
struct bdb_lvds_backlight *lvds_bl;
u8 p_type = 0;
void *bl_start = NULL;
struct bdb_lvds_options *lvds_opts
= find_section(bdb, BDB_LVDS_OPTIONS);
dev_priv->lvds_bl = NULL;
if (lvds_opts)
p_type = lvds_opts->panel_type;
else
return;
bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
if (!lvds_bl) {
dev_err(dev_priv->dev.dev, "out of memory for backlight data\n");
return;
}
dev_priv->lvds_bl = lvds_bl;
}
/* Try to find integrated panel data */
static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
struct bdb_lvds_lfp_data_entry *entry;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
dev_priv->lvds_vbt = 0;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
dev_priv->lvds_dither = lvds_options->pixel_dither;
dev_priv->panel_type = lvds_options->panel_type;
if (lvds_options->panel_type == 0xff)
return;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
return;
entry = &lvds_lfp_data->data[lvds_options->panel_type];
dvo_timing = &entry->dvo_timing;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
GFP_KERNEL);
if (panel_fixed_mode == NULL) {
dev_err(dev_priv->dev.dev, "out of memory for fixed panel mode\n");
return;
}
dev_priv->lvds_vbt = 1;
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
drm_mode_debug_printmodeline(panel_fixed_mode);
} else {
dev_dbg(dev_priv->dev.dev, "ignoring invalid LVDS VBT\n");
dev_priv->lvds_vbt = 0;
kfree(panel_fixed_mode);
}
return;
}
/* Try to find sdvo panel data */
static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
dev_priv->sdvo_lvds_vbt_mode = NULL;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
fill_detail_timing_data(panel_fixed_mode,
dvo_timing + sdvo_lvds_options->panel_type);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
return;
}
static void parse_general_features(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_features *general;
/* Set sensible defaults in case we can't find the general block */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
if (dev_priv->lvds_use_ssc) {
dev_priv->lvds_ssc_freq
= general->ssc_freq ? 100 : 96;
}
}
}
static void
parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
struct child_device_config *p_child;
int i, child_device_num, count;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* judge whether the size of child device meets the requirements.
* If the child device size obtained from general definition block
* is different with sizeof(struct child_device_config), skip the
* parsing of sdvo device info
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
DRM_DEBUG_KMS("different child size is found. Invalid.\n");
return;
}
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child);
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
if (p_child->slave_addr != SLAVE_ADDR1 &&
p_child->slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
}
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
" %s port\n",
p_child->slave_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
if (!p_mapping->initialized) {
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
p_mapping->slave_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
p_mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (p_child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
}
return;
}
static void
parse_driver_features(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_driver_features *driver;
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
return;
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->edp.support = 1;
dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
/* This bit means to use 96Mhz for DPLL_A or not */
if (driver->primary_lfp_id)
dev_priv->dplla_96mhz = true;
else
dev_priv->dplla_96mhz = false;
}
static void
parse_device_mapping(struct drm_psb_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *p_defs;
struct child_device_config *p_child, *child_dev_ptr;
int i, child_device_num, count;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
/* judge whether the size of child device meets the requirements.
* If the child device size obtained from general definition block
* is different with sizeof(struct child_device_config), skip the
* parsing of sdvo device info
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
DRM_DEBUG_KMS("different child size is found. Invalid.\n");
return;
}
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child);
count = 0;
/* get the number of child devices that are present */
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
count++;
}
if (!count) {
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
DRM_DEBUG_KMS("No memory space for child devices\n");
return;
}
dev_priv->child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
child_dev_ptr = dev_priv->child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
sizeof(*p_child));
}
return;
}
/**
* psb_intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
* VBT existence is a sanity check that is relied on by other i830_bios.c code.
* Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
* feed an updated VBT back through that, compared to what we'll fetch using
* this method of groping around in the BIOS data.
*
* Returns 0 on success, nonzero on failure.
*/
int psb_intel_init_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
size_t size;
int i;
dev_priv->panel_type = 0xff;
/* XXX Should this validation be moved to intel_opregion.c? */
if (dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
vbt->signature);
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
} else
dev_priv->opregion.vbt = NULL;
}
if (bdb == NULL) {
bios = pci_map_rom(pdev, &size);
if (!bios)
return -1;
/* Scour memory looking for the VBT signature */
for (i = 0; i + 4 < size; i++) {
if (!memcmp(bios + i, "$VBT", 4)) {
vbt = (struct vbt_header *)(bios + i);
break;
}
}
if (!vbt) {
dev_err(dev->dev, "VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
/* Grab useful general dxefinitions */
parse_general_features(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_backlight_data(dev_priv, bdb);
parse_edp(dev_priv, bdb);
if (bios)
pci_unmap_rom(pdev, bios);
return 0;
}
/*
* Destroy and free VBT data
*/
void psb_intel_destroy_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
kfree(dev_priv->sdvo_lvds_vbt_mode);
kfree(dev_priv->lfp_lvds_vbt_mode);
kfree(dev_priv->lvds_bl);
}
| linux-master | drivers/gpu/drm/gma500/intel_bios.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2011 Intel Corporation
*
* Authors:
* Eric Anholt <[email protected]>
* Patrik Jakobsson <[email protected]>
*/
#include <linux/delay.h>
#include <linux/highmem.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
#include "psb_irq.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
/*
* Returns whether any output on the specified pipe is of the specified type
*/
bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder && connector->encoder->crtc == crtc) {
struct gma_encoder *gma_encoder =
gma_attached_encoder(connector);
if (gma_encoder->type == type) {
drm_connector_list_iter_end(&conn_iter);
return true;
}
}
}
drm_connector_list_iter_end(&conn_iter);
return false;
}
void gma_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
mdelay(20);
}
int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct psb_gem_object *pobj;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
int ret = 0;
if (!gma_power_begin(dev, true))
return 0;
/* no fb bound */
if (!fb) {
dev_err(dev->dev, "No FB bound\n");
goto gma_pipe_cleaner;
}
pobj = to_psb_gem_object(fb->obj[0]);
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
ret = psb_gem_pin(pobj);
if (ret < 0)
goto gma_pipe_set_base_exit;
start = pobj->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->format->cpp[0] * 8) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto gma_pipe_set_base_exit;
}
REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev,
"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
/* FIXME: Investigate whether this really is the base for psb and why
the linear offset is named base for the other chips. map->surf
should be the base and map->linoff the offset for all chips */
if (IS_PSB(dev)) {
REG_WRITE(map->base, offset + start);
REG_READ(map->base);
} else {
REG_WRITE(map->base, offset);
REG_READ(map->base);
REG_WRITE(map->surf, start);
REG_READ(map->surf);
}
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
psb_gem_unpin(to_psb_gem_object(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
return ret;
}
/* Loads the palette/gamma unit for the CRTC with the prepared values */
void gma_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
int palreg = map->palette;
u16 *r, *g, *b;
int i;
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
return;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
if (gma_power_begin(dev, false)) {
for (i = 0; i < 256; i++) {
REG_WRITE(palreg + 4 * i,
(((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
(((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
((*b++ >> 8) + gma_crtc->lut_adj[i]));
}
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
dev_priv->regs.pipe[0].palette[i] =
(((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
(((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
((*b++ >> 8) + gma_crtc->lut_adj[i]);
}
}
}
static int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, u32 size,
struct drm_modeset_acquire_ctx *ctx)
{
gma_crtc_load_lut(crtc);
return 0;
}
/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
if (IS_CDV(dev))
dev_priv->ops->disable_sr(dev);
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
if (gma_crtc->active)
break;
gma_crtc->active = true;
/* Enable the DPLL */
temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
REG_WRITE(map->dpll, temp);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the plane */
temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(map->base, REG_READ(map->base));
}
udelay(150);
/* Enable the pipe */
temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
temp = REG_READ(map->status);
temp &= ~(0xFFFF);
temp |= PIPE_FIFO_UNDERRUN;
REG_WRITE(map->status, temp);
REG_READ(map->status);
gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_OFF:
if (!gma_crtc->active)
break;
gma_crtc->active = false;
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Turn off vblank interrupts */
drm_crtc_vblank_off(crtc);
/* Wait for vblank for the disable to take effect */
gma_wait_for_vblank(dev);
/* Disable plane */
temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(map->base, REG_READ(map->base));
REG_READ(map->base);
}
/* Disable pipe */
temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
gma_wait_for_vblank(dev);
udelay(150);
/* Disable DPLL */
temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
udelay(150);
break;
}
if (IS_CDV(dev))
dev_priv->ops->update_wm(dev, crtc);
/* Set FIFO watermarks */
REG_WRITE(DSPARB, 0x3F3E);
}
static int gma_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv, uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
uint32_t temp;
size_t addr = 0;
struct psb_gem_object *pobj;
struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj;
struct drm_gem_object *obj;
void *tmp_dst;
int ret = 0, i, cursor_pages;
/* If we didn't get a handle then turn the cursor off */
if (!handle) {
temp = CURSOR_MODE_DISABLE;
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
REG_WRITE(base, 0);
gma_power_end(dev);
}
/* Unpin the old GEM object */
if (gma_crtc->cursor_obj) {
pobj = to_psb_gem_object(gma_crtc->cursor_obj);
psb_gem_unpin(pobj);
drm_gem_object_put(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
return 0;
}
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
return -EINVAL;
}
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
ret = -ENOENT;
goto unlock;
}
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "Buffer is too small\n");
ret = -ENOMEM;
goto unref_cursor;
}
pobj = to_psb_gem_object(obj);
/* Pin the memory into the GTT */
ret = psb_gem_pin(pobj);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
goto unref_cursor;
}
if (dev_priv->ops->cursor_needs_phys) {
if (!cursor_pobj) {
dev_err(dev->dev, "No hardware cursor mem available");
ret = -ENOMEM;
goto unref_cursor;
}
cursor_pages = obj->size / PAGE_SIZE;
if (cursor_pages > 4)
cursor_pages = 4; /* Prevent overflow */
/* Copy the cursor to cursor mem */
tmp_dst = dev_priv->vram_addr + cursor_pobj->offset;
for (i = 0; i < cursor_pages; i++) {
memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE);
tmp_dst += PAGE_SIZE;
}
addr = gma_crtc->cursor_addr;
} else {
addr = pobj->offset;
gma_crtc->cursor_addr = addr;
}
temp = 0;
/* set the pipe for the cursor */
temp |= (pipe << 28);
temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
REG_WRITE(base, addr);
gma_power_end(dev);
}
/* unpin the old bo */
if (gma_crtc->cursor_obj) {
pobj = to_psb_gem_object(gma_crtc->cursor_obj);
psb_gem_unpin(pobj);
drm_gem_object_put(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
unlock:
return ret;
unref_cursor:
drm_gem_object_put(obj);
return ret;
}
static int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
uint32_t temp = 0;
uint32_t addr;
if (x < 0) {
temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
x = -x;
}
if (y < 0) {
temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
y = -y;
}
temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
addr = gma_crtc->cursor_addr;
if (gma_power_begin(dev, false)) {
REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
gma_power_end(dev);
}
return 0;
}
void gma_crtc_prepare(struct drm_crtc *crtc)
{
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
void gma_crtc_commit(struct drm_crtc *crtc)
{
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
void gma_crtc_disable(struct drm_crtc *crtc)
{
struct psb_gem_object *pobj;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
pobj = to_psb_gem_object(crtc->primary->fb->obj[0]);
psb_gem_unpin(pobj);
}
}
void gma_crtc_destroy(struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
if (gma_crtc->cursor_pobj)
drm_gem_object_put(&gma_crtc->cursor_pobj->base);
kfree(gma_crtc->crtc_state);
drm_crtc_cleanup(crtc);
kfree(gma_crtc);
}
int gma_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
struct drm_modeset_acquire_ctx *ctx)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *current_fb = crtc->primary->fb;
struct drm_framebuffer *old_fb = crtc->primary->old_fb;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
unsigned long flags;
int ret;
if (!crtc_funcs->mode_set_base)
return -EINVAL;
/* Using mode_set_base requires the new fb to be set already. */
crtc->primary->fb = fb;
if (event) {
spin_lock_irqsave(&dev->event_lock, flags);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
spin_lock_irqsave(&dev->event_lock, flags);
if (gma_crtc->page_flip_event) {
gma_crtc->page_flip_event = NULL;
drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
/* Restore previous fb in case of failure. */
if (ret)
crtc->primary->fb = current_fb;
return ret;
}
const struct drm_crtc_funcs gma_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
.enable_vblank = gma_crtc_enable_vblank,
.disable_vblank = gma_crtc_disable_vblank,
.get_vblank_counter = gma_crtc_get_vblank_counter,
};
/*
* Save HW states of given crtc
*/
void gma_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
uint32_t palette_reg;
int i;
if (!crtc_state) {
dev_err(dev->dev, "No CRTC state found\n");
return;
}
crtc_state->saveDSPCNTR = REG_READ(map->cntr);
crtc_state->savePIPECONF = REG_READ(map->conf);
crtc_state->savePIPESRC = REG_READ(map->src);
crtc_state->saveFP0 = REG_READ(map->fp0);
crtc_state->saveFP1 = REG_READ(map->fp1);
crtc_state->saveDPLL = REG_READ(map->dpll);
crtc_state->saveHTOTAL = REG_READ(map->htotal);
crtc_state->saveHBLANK = REG_READ(map->hblank);
crtc_state->saveHSYNC = REG_READ(map->hsync);
crtc_state->saveVTOTAL = REG_READ(map->vtotal);
crtc_state->saveVBLANK = REG_READ(map->vblank);
crtc_state->saveVSYNC = REG_READ(map->vsync);
crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/* NOTE: DSPSIZE DSPPOS only for psb */
crtc_state->saveDSPSIZE = REG_READ(map->size);
crtc_state->saveDSPPOS = REG_READ(map->pos);
crtc_state->saveDSPBASE = REG_READ(map->base);
palette_reg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
}
/*
* Restore HW states of given crtc
*/
void gma_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
uint32_t palette_reg;
int i;
if (!crtc_state) {
dev_err(dev->dev, "No crtc state\n");
return;
}
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
REG_WRITE(map->dpll,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
REG_READ(map->dpll);
udelay(150);
}
REG_WRITE(map->fp0, crtc_state->saveFP0);
REG_READ(map->fp0);
REG_WRITE(map->fp1, crtc_state->saveFP1);
REG_READ(map->fp1);
REG_WRITE(map->dpll, crtc_state->saveDPLL);
REG_READ(map->dpll);
udelay(150);
REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
REG_WRITE(map->hblank, crtc_state->saveHBLANK);
REG_WRITE(map->hsync, crtc_state->saveHSYNC);
REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
REG_WRITE(map->vblank, crtc_state->saveVBLANK);
REG_WRITE(map->vsync, crtc_state->saveVSYNC);
REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
REG_WRITE(map->size, crtc_state->saveDSPSIZE);
REG_WRITE(map->pos, crtc_state->saveDSPPOS);
REG_WRITE(map->src, crtc_state->savePIPESRC);
REG_WRITE(map->base, crtc_state->saveDSPBASE);
REG_WRITE(map->conf, crtc_state->savePIPECONF);
gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
REG_WRITE(map->base, crtc_state->saveDSPBASE);
gma_wait_for_vblank(dev);
palette_reg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
}
void gma_encoder_prepare(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
/* lvds has its own version of prepare see psb_intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
void gma_encoder_commit(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
/* lvds has its own version of commit see psb_intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
void gma_encoder_destroy(struct drm_encoder *encoder)
{
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
/* Currently there is only a 1:1 mapping of encoders and connectors */
struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
{
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
return &gma_encoder->base;
}
void gma_connector_attach_encoder(struct gma_connector *connector,
struct gma_encoder *encoder)
{
connector->encoder = encoder;
drm_connector_attach_encoder(&connector->base,
&encoder->base);
}
#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
bool gma_pll_is_valid(struct drm_crtc *crtc,
const struct gma_limit_t *limit,
struct gma_clock_t *clock)
{
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
GMA_PLL_INVALID("p1 out of range");
if (clock->p < limit->p.min || limit->p.max < clock->p)
GMA_PLL_INVALID("p out of range");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
GMA_PLL_INVALID("m2 out of range");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
GMA_PLL_INVALID("m1 out of range");
/* On CDV m1 is always 0 */
if (clock->m1 <= clock->m2 && clock->m1 != 0)
GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
if (clock->m < limit->m.min || limit->m.max < clock->m)
GMA_PLL_INVALID("m out of range");
if (clock->n < limit->n.min || limit->n.max < clock->n)
GMA_PLL_INVALID("n out of range");
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
GMA_PLL_INVALID("vco out of range");
/* XXX: We may need to be checking "Dot clock"
* depending on the multiplier, connector, etc.,
* rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
GMA_PLL_INVALID("dot out of range");
return true;
}
bool gma_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target, int refclk,
struct gma_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
const struct gma_clock_funcs *clock_funcs =
to_gma_crtc(crtc)->clock_funcs;
struct gma_clock_t clock;
int err = target;
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
(REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
/*
* For LVDS, if the panel is on, just rely on its current
* settings for dual-channel. We haven't figured out how to
* reliably set up different single/dual channel state, if we
* even can.
*/
if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset(best_clock, 0, sizeof(*best_clock));
/* m1 is always 0 on CDV so the outmost loop will run just once */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
for (clock.m2 = limit->m2.min;
(clock.m2 < clock.m1 || clock.m1 == 0) &&
clock.m2 <= limit->m2.max; clock.m2++) {
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max;
clock.p1++) {
int this_err;
clock_funcs->clock(refclk, &clock);
if (!clock_funcs->pll_is_valid(crtc,
limit, &clock))
continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
}
}
return err != target;
}
| linux-master | drivers/gpu/drm/gma500/gma_display.c |
/*
* Copyright © 2006-2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* jim liu <[email protected]>
*/
#include <linux/pm_runtime.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
/* hdmi control bits */
#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
#define HDMI_BORDER_ENABLE (1 << 7)
#define HDMI_AUDIO_ENABLE (1 << 6)
#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
/* hdmi-b control bits */
#define HDMIB_PIPE_B_SELECT (1 << 30)
struct mid_intel_hdmi_priv {
u32 hdmi_reg;
u32 save_HDMIB;
bool has_hdmi_sink;
bool has_hdmi_audio;
/* Should set this when detect hotplug */
bool hdmi_device_connected;
struct drm_device *dev;
};
static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
u32 hdmib;
struct drm_crtc *crtc = encoder->crtc;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
hdmib = (2 << 10);
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
if (gma_crtc->pipe == 1)
hdmib |= HDMIB_PIPE_B_SELECT;
if (hdmi_priv->has_hdmi_audio) {
hdmib |= HDMI_AUDIO_ENABLE;
hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
}
REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
REG_READ(hdmi_priv->hdmi_reg);
}
static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
u32 hdmib;
hdmib = REG_READ(hdmi_priv->hdmi_reg);
if (mode != DRM_MODE_DPMS_ON)
REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
else
REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
REG_READ(hdmi_priv->hdmi_reg);
}
static void cdv_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
}
static void cdv_hdmi_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
REG_READ(hdmi_priv->hdmi_reg);
}
static enum drm_connector_status cdv_hdmi_detect(
struct drm_connector *connector, bool force)
{
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
edid = drm_get_edid(connector, connector->ddc);
hdmi_priv->has_hdmi_sink = false;
hdmi_priv->has_hdmi_audio = false;
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
hdmi_priv->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
hdmi_priv->has_hdmi_audio =
drm_detect_monitor_audio(edid);
}
kfree(edid);
}
return status;
}
static int cdv_hdmi_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
bool centre;
uint64_t curValue;
if (!crtc)
return -1;
switch (value) {
case DRM_MODE_SCALE_FULLSCREEN:
break;
case DRM_MODE_SCALE_NO_SCALE:
break;
case DRM_MODE_SCALE_ASPECT:
break;
default:
return -1;
}
if (drm_object_property_get_value(&connector->base,
property, &curValue))
return -1;
if (curValue == value)
return 0;
if (drm_object_property_set_value(&connector->base,
property, value))
return -1;
centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
(value == DRM_MODE_SCALE_NO_SCALE);
if (crtc->saved_mode.hdisplay != 0 &&
crtc->saved_mode.vdisplay != 0) {
if (centre) {
if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
encoder->crtc->x, encoder->crtc->y, encoder->crtc->primary->fb))
return -1;
} else {
const struct drm_encoder_helper_funcs *helpers
= encoder->helper_private;
helpers->mode_set(encoder, &crtc->saved_mode,
&crtc->saved_adjusted_mode);
}
}
}
return 0;
}
/*
* Return the list of HDMI DDC modes if available.
*/
static int cdv_hdmi_get_modes(struct drm_connector *connector)
{
struct edid *edid = NULL;
int ret = 0;
edid = drm_get_edid(connector, connector->ddc);
if (edid) {
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return ret;
}
static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_HIGH;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
return MODE_OK;
}
static void cdv_hdmi_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_i2c_chan *ddc_bus = to_gma_i2c_chan(connector->ddc);
gma_i2c_destroy(ddc_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
.dpms = cdv_hdmi_dpms,
.prepare = gma_encoder_prepare,
.mode_set = cdv_hdmi_mode_set,
.commit = gma_encoder_commit,
};
static const struct drm_connector_helper_funcs
cdv_hdmi_connector_helper_funcs = {
.get_modes = cdv_hdmi_get_modes,
.mode_valid = cdv_hdmi_mode_valid,
.best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = cdv_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_hdmi_set_property,
.destroy = cdv_hdmi_destroy,
};
void cdv_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int reg)
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct drm_connector *connector;
struct mid_intel_hdmi_priv *hdmi_priv;
struct gma_i2c_chan *ddc_bus;
int ddc_reg;
int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
goto err_free_encoder;
hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
if (!hdmi_priv)
goto err_free_connector;
connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
gma_connector->save = cdv_hdmi_save;
gma_connector->restore = cdv_hdmi_restore;
switch (reg) {
case SDVOB:
ddc_reg = GPIOE;
gma_encoder->ddi_select = DDI0_SELECT;
break;
case SDVOC:
ddc_reg = GPIOD;
gma_encoder->ddi_select = DDI1_SELECT;
break;
default:
DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
goto err_free_hdmi_priv;
}
ddc_bus = gma_i2c_create(dev, ddc_reg,
(reg == SDVOB) ? "HDMIB" : "HDMIC");
if (!ddc_bus) {
dev_err(dev->dev, "No ddc adapter available!\n");
goto err_free_hdmi_priv;
}
ret = drm_connector_init_with_ddc(dev, connector,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID,
&ddc_bus->base);
if (ret)
goto err_ddc_destroy;
ret = drm_simple_encoder_init(dev, &gma_encoder->base,
DRM_MODE_ENCODER_TMDS);
if (ret)
goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
hdmi_priv->hdmi_reg = reg;
hdmi_priv->has_hdmi_sink = false;
gma_encoder->dev_priv = hdmi_priv;
drm_encoder_helper_add(&gma_encoder->base, &cdv_hdmi_helper_funcs);
drm_connector_helper_add(connector,
&cdv_hdmi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
hdmi_priv->dev = dev;
return;
err_connector_cleanup:
drm_connector_cleanup(connector);
err_ddc_destroy:
gma_i2c_destroy(ddc_bus);
err_free_hdmi_priv:
kfree(hdmi_priv);
err_free_connector:
kfree(gma_connector);
err_free_encoder:
kfree(gma_encoder);
}
| linux-master | drivers/gpu/drm/gma500/cdv_intel_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007, Intel Corporation.
* All Rights Reserved.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
* Alan Cox <[email protected]>
*/
#include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */
#include "psb_drv.h"
/*
* GTT resource allocator - manage page mappings in GTT space
*/
int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res,
const char *name, resource_size_t size, resource_size_t align,
bool stolen, u32 *offset)
{
struct resource *root = pdev->gtt_mem;
resource_size_t start, end;
int ret;
if (stolen) {
/* The start of the GTT is backed by stolen pages. */
start = root->start;
end = root->start + pdev->gtt.stolen_size - 1;
} else {
/* The rest is backed by system pages. */
start = root->start + pdev->gtt.stolen_size;
end = root->end;
}
res->name = name;
ret = allocate_resource(root, res, size, start, end, align, NULL, NULL);
if (ret)
return ret;
*offset = res->start - root->start;
return 0;
}
/**
* psb_gtt_mask_pte - generate GTT pte entry
* @pfn: page number to encode
* @type: type of memory in the GTT
*
* Set the GTT entry for the appropriate memory type.
*/
uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
/* Ensure we explode rather than put an invalid low mapping of
a high mapping page into the gtt */
BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
mask |= PSB_PTE_RO;
if (type & PSB_MMU_WO_MEMORY)
mask |= PSB_PTE_WO;
return (pfn << PAGE_SHIFT) | mask;
}
static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct resource *res)
{
unsigned long offset = res->start - pdev->gtt_mem->start;
return pdev->gtt_map + (offset >> PAGE_SHIFT);
}
/* Acquires GTT mutex internally. */
void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res,
struct page **pages)
{
resource_size_t npages, i;
u32 __iomem *gtt_slot;
u32 pte;
mutex_lock(&pdev->gtt_mutex);
/* Write our page entries into the GTT itself */
npages = resource_size(res) >> PAGE_SHIFT;
gtt_slot = psb_gtt_entry(pdev, res);
for (i = 0; i < npages; ++i, ++gtt_slot) {
pte = psb_gtt_mask_pte(page_to_pfn(pages[i]), PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot);
}
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
mutex_unlock(&pdev->gtt_mutex);
}
/* Acquires GTT mutex internally. */
void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res)
{
resource_size_t npages, i;
u32 __iomem *gtt_slot;
u32 pte;
mutex_lock(&pdev->gtt_mutex);
/* Install scratch page for the resource */
pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY);
npages = resource_size(res) >> PAGE_SHIFT;
gtt_slot = psb_gtt_entry(pdev, res);
for (i = 0; i < npages; ++i, ++gtt_slot)
iowrite32(pte, gtt_slot);
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
mutex_unlock(&pdev->gtt_mutex);
}
static int psb_gtt_enable(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
if (ret)
return pcibios_err_to_errno(ret);
ret = pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
if (ret)
return pcibios_err_to_errno(ret);
dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
(void)PSB_RVDC32(PSB_PGETBL_CTL);
return 0;
}
static void psb_gtt_disable(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl);
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void)PSB_RVDC32(PSB_PGETBL_CTL);
}
void psb_gtt_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iounmap(dev_priv->gtt_map);
psb_gtt_disable(dev_priv);
mutex_destroy(&dev_priv->gtt_mutex);
}
/* Clear GTT. Use a scratch page to avoid accidents or scribbles. */
static void psb_gtt_clear(struct drm_psb_private *pdev)
{
resource_size_t pfn_base;
unsigned long i;
uint32_t pte;
pfn_base = page_to_pfn(pdev->scratch_page);
pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
for (i = 0; i < pdev->gtt.gtt_pages; ++i)
iowrite32(pte, pdev->gtt_map + i);
(void)ioread32(pdev->gtt_map + i - 1);
}
static void psb_gtt_init_ranges(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_gtt *pg = &dev_priv->gtt;
resource_size_t gtt_phys_start, mmu_gatt_start, gtt_start, gtt_pages,
gatt_start, gatt_pages;
struct resource *gtt_mem;
/* The root resource we allocate address space from */
gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
/*
* The video MMU has a HW bug when accessing 0x0d0000000. Make
* GATT start at 0x0e0000000. This doesn't actually matter for
* us now, but maybe will if the video acceleration ever gets
* opened up.
*/
mmu_gatt_start = 0xe0000000;
gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
/* CDV doesn't report this. In which case the system has 64 gtt pages */
if (!gtt_start || !gtt_pages) {
dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
gtt_pages = 64;
gtt_start = dev_priv->pge_ctl;
}
gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE) >> PAGE_SHIFT;
if (!gatt_pages || !gatt_start) {
static struct resource fudge; /* Preferably peppermint */
/*
* This can occur on CDV systems. Fudge it in this case. We
* really don't care what imaginary space is being allocated
* at this point.
*/
dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
gatt_start = 0x40000000;
gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
/*
* This is a little confusing but in fact the GTT is providing
* a view from the GPU into memory and not vice versa. As such
* this is really allocating space that is not the same as the
* CPU address space on CDV.
*/
fudge.start = 0x40000000;
fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
fudge.name = "fudge";
fudge.flags = IORESOURCE_MEM;
gtt_mem = &fudge;
} else {
gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
}
pg->gtt_phys_start = gtt_phys_start;
pg->mmu_gatt_start = mmu_gatt_start;
pg->gtt_start = gtt_start;
pg->gtt_pages = gtt_pages;
pg->gatt_start = gatt_start;
pg->gatt_pages = gatt_pages;
dev_priv->gtt_mem = gtt_mem;
}
int psb_gtt_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
int ret;
mutex_init(&dev_priv->gtt_mutex);
ret = psb_gtt_enable(dev_priv);
if (ret)
goto err_mutex_destroy;
psb_gtt_init_ranges(dev_priv);
dev_priv->gtt_map = ioremap(pg->gtt_phys_start, pg->gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
ret = -ENOMEM;
goto err_psb_gtt_disable;
}
psb_gtt_clear(dev_priv);
return 0;
err_psb_gtt_disable:
psb_gtt_disable(dev_priv);
err_mutex_destroy:
mutex_destroy(&dev_priv->gtt_mutex);
return ret;
}
int psb_gtt_resume(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
unsigned int old_gtt_pages = pg->gtt_pages;
int ret;
/* Enable the GTT */
ret = psb_gtt_enable(dev_priv);
if (ret)
return ret;
psb_gtt_init_ranges(dev_priv);
if (old_gtt_pages != pg->gtt_pages) {
dev_err(dev->dev, "GTT resume error.\n");
ret = -ENODEV;
goto err_psb_gtt_disable;
}
psb_gtt_clear(dev_priv);
err_psb_gtt_disable:
psb_gtt_disable(dev_priv);
return ret;
}
| linux-master | drivers/gpu/drm/gma500/gtt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007 Intel Corporation
*
* Authers: Jesse Barnes <[email protected]>
*/
#include <linux/i2c.h>
#include <drm/drm_edid.h>
#include "psb_intel_drv.h"
/**
* psb_intel_ddc_probe
* @adapter: Associated I2C adaptor
*/
bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
{
u8 out_buf[] = { 0x0, 0x0 };
u8 buf[2];
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = 0x50,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
if (ret == 2)
return true;
return false;
}
/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
*
* Fetch the EDID information from @connector using the DDC bus.
*/
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
edid = drm_get_edid(connector, adapter);
if (edid) {
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return ret;
}
| linux-master | drivers/gpu/drm/gma500/psb_intel_modes.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
**************************************************************************/
#include <linux/highmem.h>
#include "mmu.h"
#include "psb_drv.h"
#include "psb_reg.h"
/*
* Code for the SGX MMU:
*/
/*
* clflush on one processor only:
* clflush should apparently flush the cache line on all processors in an
* SMP system.
*/
/*
* kmap atomic:
* The usage of the slots must be completely encapsulated within a spinlock, and
* no other functions that may be using the locks for other purposed may be
* called from within the locked region.
* Since the slots are per processor, this will guarantee that we are the only
* user.
*/
/*
* TODO: Inserting ptes from an interrupt handler:
* This may be desirable for some SGX functionality where the GPU can fault in
* needed pages. For that, we need to make an atomic insert_pages function, that
* may fail.
* If it fails, the caller need to insert the page using a workqueue function,
* but on average it should be fast.
*/
static inline uint32_t psb_mmu_pt_index(uint32_t offset)
{
return (offset >> PSB_PTE_SHIFT) & 0x3FF;
}
static inline uint32_t psb_mmu_pd_index(uint32_t offset)
{
return offset >> PSB_PDE_SHIFT;
}
static inline void psb_clflush(void *addr)
{
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
}
static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
{
if (!driver->has_clflush)
return;
mb();
psb_clflush(addr);
mb();
}
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (atomic_read(&driver->needs_tlbflush) || force) {
uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
/* Make sure data cache is turned off before enabling it */
wmb();
PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
if (driver->msvdx_mmu_invaldc)
atomic_set(driver->msvdx_mmu_invaldc, 1);
}
atomic_set(&driver->needs_tlbflush, 0);
}
#if 0
static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
{
down_write(&driver->sem);
psb_mmu_flush_pd_locked(driver, force);
up_write(&driver->sem);
}
#endif
void psb_mmu_flush(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t val;
down_write(&driver->sem);
val = PSB_RSGX32(PSB_CR_BIF_CTRL);
if (atomic_read(&driver->needs_tlbflush))
PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
else
PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
/* Make sure data cache is turned off and MMU is flushed before
restoring bank interface control register */
wmb();
PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
PSB_CR_BIF_CTRL);
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
atomic_set(&driver->needs_tlbflush, 0);
if (driver->msvdx_mmu_invaldc)
atomic_set(driver->msvdx_mmu_invaldc, 1);
up_write(&driver->sem);
}
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
struct drm_device *dev = pd->driver->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
down_write(&pd->driver->sem);
PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
wmb();
psb_mmu_flush_pd_locked(pd->driver, 1);
pd->hw_context = hw_context;
up_write(&pd->driver->sem);
}
static inline unsigned long psb_pd_addr_end(unsigned long addr,
unsigned long end)
{
addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
return (addr < end) ? addr : end;
}
static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
mask |= PSB_PTE_RO;
if (type & PSB_MMU_WO_MEMORY)
mask |= PSB_PTE_WO;
return (pfn << PAGE_SHIFT) | mask;
}
struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults, int invalid_type)
{
struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
uint32_t *v;
int i;
if (!pd)
return NULL;
pd->p = alloc_page(GFP_DMA32);
if (!pd->p)
goto out_err1;
pd->dummy_pt = alloc_page(GFP_DMA32);
if (!pd->dummy_pt)
goto out_err2;
pd->dummy_page = alloc_page(GFP_DMA32);
if (!pd->dummy_page)
goto out_err3;
if (!trap_pagefaults) {
pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
invalid_type);
pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
invalid_type);
} else {
pd->invalid_pde = 0;
pd->invalid_pte = 0;
}
v = kmap_local_page(pd->dummy_pt);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pte;
kunmap_local(v);
v = kmap_local_page(pd->p);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pde;
kunmap_local(v);
clear_page(kmap(pd->dummy_page));
kunmap(pd->dummy_page);
pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
if (!pd->tables)
goto out_err4;
pd->hw_context = -1;
pd->pd_mask = PSB_PTE_VALID;
pd->driver = driver;
return pd;
out_err4:
__free_page(pd->dummy_page);
out_err3:
__free_page(pd->dummy_pt);
out_err2:
__free_page(pd->p);
out_err1:
kfree(pd);
return NULL;
}
static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
{
__free_page(pt->p);
kfree(pt);
}
void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_mmu_pt *pt;
int i;
down_write(&driver->sem);
if (pd->hw_context != -1) {
PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
psb_mmu_flush_pd_locked(driver, 1);
}
/* Should take the spinlock here, but we don't need to do that
since we have the semaphore in write mode. */
for (i = 0; i < 1024; ++i) {
pt = pd->tables[i];
if (pt)
psb_mmu_free_pt(pt);
}
vfree(pd->tables);
__free_page(pd->dummy_page);
__free_page(pd->dummy_pt);
__free_page(pd->p);
kfree(pd);
up_write(&driver->sem);
}
static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
{
struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
void *v;
uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
uint32_t clflush_count = PAGE_SIZE / clflush_add;
spinlock_t *lock = &pd->driver->lock;
uint8_t *clf;
uint32_t *ptes;
int i;
if (!pt)
return NULL;
pt->p = alloc_page(GFP_DMA32);
if (!pt->p) {
kfree(pt);
return NULL;
}
spin_lock(lock);
v = kmap_atomic(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte;
if (pd->driver->has_clflush && pd->hw_context != -1) {
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
}
kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
pt->pd = pd;
pt->index = 0;
return pt;
}
static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
uint32_t *v;
spinlock_t *lock = &pd->driver->lock;
spin_lock(lock);
pt = pd->tables[index];
while (!pt) {
spin_unlock(lock);
pt = psb_mmu_alloc_pt(pd);
if (!pt)
return NULL;
spin_lock(lock);
if (pd->tables[index]) {
spin_unlock(lock);
psb_mmu_free_pt(pt);
spin_lock(lock);
pt = pd->tables[index];
continue;
}
v = kmap_atomic(pd->p);
pd->tables[index] = pt;
v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
pt->index = index;
kunmap_atomic((void *) v);
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *)&v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
pt->v = kmap_atomic(pt->p);
return pt;
}
static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
spinlock_t *lock = &pd->driver->lock;
spin_lock(lock);
pt = pd->tables[index];
if (!pt) {
spin_unlock(lock);
return NULL;
}
pt->v = kmap_atomic(pt->p);
return pt;
}
static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
{
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
kunmap_atomic(pt->v);
if (pt->count == 0) {
v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
pd->tables[pt->index] = NULL;
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
kunmap_atomic(v);
spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt);
return;
}
spin_unlock(&pd->driver->lock);
}
static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
uint32_t pte)
{
pt->v[psb_mmu_pt_index(addr)] = pte;
}
static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
unsigned long addr)
{
pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
}
struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
{
struct psb_mmu_pd *pd;
down_read(&driver->sem);
pd = driver->default_pd;
up_read(&driver->sem);
return pd;
}
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd);
kfree(driver);
}
struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
int trap_pagefaults,
int invalid_type,
atomic_t *msvdx_mmu_invaldc)
{
struct psb_mmu_driver *driver;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return NULL;
driver->dev = dev;
driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
invalid_type);
if (!driver->default_pd)
goto out_err1;
spin_lock_init(&driver->lock);
init_rwsem(&driver->sem);
down_write(&driver->sem);
atomic_set(&driver->needs_tlbflush, 1);
driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
driver->has_clflush = 0;
if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
/*
* clflush size is determined at kernel setup for x86_64 but not
* for i386. We have to do it here.
*/
cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
clflush_size = ((misc >> 8) & 0xff) * 8;
driver->has_clflush = 1;
driver->clflush_add =
PAGE_SIZE * clflush_size / sizeof(uint32_t);
driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask;
}
up_write(&driver->sem);
return driver;
out_err1:
kfree(driver);
return NULL;
}
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long clflush_add = pd->driver->clflush_add;
unsigned long clflush_mask = pd->driver->clflush_mask;
if (!pd->driver->has_clflush)
return;
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
else
desired_tile_stride = num_pages;
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
mb();
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_map_lock(pd, addr);
if (!pt)
continue;
do {
psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
} while (addr += clflush_add,
(addr & clflush_mask) < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
mb();
}
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages)
{
struct psb_mmu_pt *pt;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long f_address = address;
down_read(&pd->driver->sem);
addr = address;
end = addr + (num_pages << PAGE_SHIFT);
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt)
goto out;
do {
psb_mmu_invalidate_pte(pt, addr);
--pt->count;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
return;
}
void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
else
desired_tile_stride = num_pages;
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
down_read(&pd->driver->sem);
/* Make sure we only need to flush this processor's cache */
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_map_lock(pd, addr);
if (!pt)
continue;
do {
psb_mmu_invalidate_pte(pt, addr);
--pt->count;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
}
int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
unsigned long address, uint32_t num_pages,
int type)
{
struct psb_mmu_pt *pt;
uint32_t pte;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long f_address = address;
int ret = -ENOMEM;
down_read(&pd->driver->sem);
addr = address;
end = addr + (num_pages << PAGE_SHIFT);
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt) {
ret = -ENOMEM;
goto out;
}
do {
pte = psb_mmu_mask_pte(start_pfn++, type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
ret = 0;
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
return ret;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride, uint32_t hw_tile_stride,
int type)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
uint32_t pte;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
int ret = -ENOMEM;
if (hw_tile_stride) {
if (num_pages % desired_tile_stride != 0)
return -EINVAL;
rows = num_pages / desired_tile_stride;
} else {
desired_tile_stride = num_pages;
}
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
down_read(&pd->driver->sem);
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt)
goto out;
do {
pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
ret = 0;
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
return ret;
}
int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn)
{
int ret;
struct psb_mmu_pt *pt;
uint32_t tmp;
spinlock_t *lock = &pd->driver->lock;
down_read(&pd->driver->sem);
pt = psb_mmu_pt_map_lock(pd, virtual);
if (!pt) {
uint32_t *v;
spin_lock(lock);
v = kmap_atomic(pd->p);
tmp = v[psb_mmu_pd_index(virtual)];
kunmap_atomic(v);
spin_unlock(lock);
if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
!(pd->invalid_pte & PSB_PTE_VALID)) {
ret = -EINVAL;
goto out;
}
ret = 0;
*pfn = pd->invalid_pte >> PAGE_SHIFT;
goto out;
}
tmp = pt->v[psb_mmu_pt_index(virtual)];
if (!(tmp & PSB_PTE_VALID)) {
ret = -EINVAL;
} else {
ret = 0;
*pfn = tmp >> PAGE_SHIFT;
}
psb_mmu_pt_unmap_unlock(pt);
out:
up_read(&pd->driver->sem);
return ret;
}
| linux-master | drivers/gpu/drm/gma500/mmu.c |
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Li Peng <[email protected]>
*/
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include "psb_drv.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
#define HDMI_HCR 0x1000
#define HCR_DETECT_HDP (1 << 6)
#define HCR_ENABLE_HDCP (1 << 5)
#define HCR_ENABLE_AUDIO (1 << 2)
#define HCR_ENABLE_PIXEL (1 << 1)
#define HCR_ENABLE_TMDS (1 << 0)
#define HDMI_HICR 0x1004
#define HDMI_INTR_I2C_ERROR (1 << 4)
#define HDMI_INTR_I2C_FULL (1 << 3)
#define HDMI_INTR_I2C_DONE (1 << 2)
#define HDMI_INTR_HPD (1 << 0)
#define HDMI_HSR 0x1008
#define HDMI_HISR 0x100C
#define HDMI_HI2CRDB0 0x1200
#define HDMI_HI2CHCR 0x1240
#define HI2C_HDCP_WRITE (0 << 2)
#define HI2C_HDCP_RI_READ (1 << 2)
#define HI2C_HDCP_READ (2 << 2)
#define HI2C_EDID_READ (3 << 2)
#define HI2C_READ_CONTINUE (1 << 1)
#define HI2C_ENABLE_TRANSACTION (1 << 0)
#define HDMI_ICRH 0x1100
#define HDMI_HI2CTDR0 0x1244
#define HDMI_HI2CTDR1 0x1248
#define I2C_STAT_INIT 0
#define I2C_READ_DONE 1
#define I2C_TRANSACTION_DONE 2
struct hdmi_i2c_dev {
struct i2c_adapter *adap;
struct mutex i2c_lock;
struct completion complete;
int status;
struct i2c_msg *msg;
int buf_offset;
};
static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
{
u32 temp;
temp = HDMI_READ(HDMI_HICR);
temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
HDMI_WRITE(HDMI_HICR, temp);
HDMI_READ(HDMI_HICR);
}
static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
{
HDMI_WRITE(HDMI_HICR, 0x0);
HDMI_READ(HDMI_HICR);
}
static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
{
struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
u32 temp;
i2c_dev->status = I2C_STAT_INIT;
i2c_dev->msg = pmsg;
i2c_dev->buf_offset = 0;
reinit_completion(&i2c_dev->complete);
/* Enable I2C transaction */
temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
HDMI_WRITE(HDMI_HI2CHCR, temp);
HDMI_READ(HDMI_HI2CHCR);
while (i2c_dev->status != I2C_TRANSACTION_DONE)
wait_for_completion_interruptible_timeout(&i2c_dev->complete,
10 * HZ);
return 0;
}
static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
{
/*
* XXX: i2c write seems isn't useful for EDID probe, don't do anything
*/
return 0;
}
static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
struct i2c_msg *pmsg,
int num)
{
struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
int i;
mutex_lock(&i2c_dev->i2c_lock);
/* Enable i2c unit */
HDMI_WRITE(HDMI_ICRH, 0x00008760);
/* Enable irq */
hdmi_i2c_irq_enable(hdmi_dev);
for (i = 0; i < num; i++) {
if (pmsg->len && pmsg->buf) {
if (pmsg->flags & I2C_M_RD)
xfer_read(adap, pmsg);
else
xfer_write(adap, pmsg);
}
pmsg++; /* next message */
}
/* Disable irq */
hdmi_i2c_irq_disable(hdmi_dev);
mutex_unlock(&i2c_dev->i2c_lock);
return i;
}
static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
.master_xfer = oaktrail_hdmi_i2c_access,
.functionality = oaktrail_hdmi_i2c_func,
};
static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
.name = "oaktrail_hdmi_i2c",
.nr = 3,
.owner = THIS_MODULE,
.class = I2C_CLASS_DDC,
.algo = &oaktrail_hdmi_i2c_algorithm,
};
static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
{
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
struct i2c_msg *msg = i2c_dev->msg;
u8 *buf = msg->buf;
u32 temp;
int i, offset;
offset = i2c_dev->buf_offset;
for (i = 0; i < 0x10; i++) {
temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
memcpy(buf + (offset + i * 4), &temp, 4);
}
i2c_dev->buf_offset += (0x10 * 4);
/* clearing read buffer full intr */
temp = HDMI_READ(HDMI_HISR);
HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
HDMI_READ(HDMI_HISR);
/* continue read transaction */
temp = HDMI_READ(HDMI_HI2CHCR);
HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
HDMI_READ(HDMI_HI2CHCR);
i2c_dev->status = I2C_READ_DONE;
return;
}
static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
{
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
u32 temp;
/* clear transaction done intr */
temp = HDMI_READ(HDMI_HISR);
HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
HDMI_READ(HDMI_HISR);
temp = HDMI_READ(HDMI_HI2CHCR);
HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
HDMI_READ(HDMI_HI2CHCR);
i2c_dev->status = I2C_TRANSACTION_DONE;
return;
}
static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
{
struct oaktrail_hdmi_dev *hdmi_dev = dev;
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
u32 stat;
stat = HDMI_READ(HDMI_HISR);
if (stat & HDMI_INTR_HPD) {
HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
HDMI_READ(HDMI_HISR);
}
if (stat & HDMI_INTR_I2C_FULL)
hdmi_i2c_read(hdmi_dev);
if (stat & HDMI_INTR_I2C_DONE)
hdmi_i2c_transaction_done(hdmi_dev);
complete(&i2c_dev->complete);
return IRQ_HANDLED;
}
/*
* choose alternate function 2 of GPIO pin 52, 53,
* which is used by HDMI I2C logic
*/
static void oaktrail_hdmi_i2c_gpio_fix(void)
{
void __iomem *base;
unsigned int gpio_base = 0xff12c000;
int gpio_len = 0x1000;
u32 temp;
base = ioremap((resource_size_t)gpio_base, gpio_len);
if (base == NULL) {
DRM_ERROR("gpio ioremap fail\n");
return;
}
temp = readl(base + 0x44);
DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
writel((temp | 0x00000a00), (base + 0x44));
temp = readl(base + 0x44);
DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
iounmap(base);
}
int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
{
struct oaktrail_hdmi_dev *hdmi_dev;
struct hdmi_i2c_dev *i2c_dev;
int ret;
hdmi_dev = pci_get_drvdata(dev);
i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
if (!i2c_dev)
return -ENOMEM;
i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
init_completion(&i2c_dev->complete);
mutex_init(&i2c_dev->i2c_lock);
i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
hdmi_dev->i2c_dev = i2c_dev;
/* Enable HDMI I2C function on gpio */
oaktrail_hdmi_i2c_gpio_fix();
/* request irq */
ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
goto free_dev;
}
/* Adapter registration */
ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
if (ret) {
DRM_ERROR("Failed to add I2C adapter\n");
goto free_irq;
}
return 0;
free_irq:
free_irq(dev->irq, hdmi_dev);
free_dev:
kfree(i2c_dev);
return ret;
}
void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
{
struct oaktrail_hdmi_dev *hdmi_dev;
struct hdmi_i2c_dev *i2c_dev;
hdmi_dev = pci_get_drvdata(dev);
i2c_del_adapter(&oaktrail_hdmi_i2c_adapter);
i2c_dev = hdmi_dev->i2c_dev;
kfree(i2c_dev);
free_irq(dev->irq, hdmi_dev);
}
| linux-master | drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include "framebuffer.h"
#include "psb_drv.h"
static const struct drm_framebuffer_funcs psb_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
/**
* psb_framebuffer_init - initialize a framebuffer
* @dev: our DRM device
* @fb: framebuffer to set up
* @mode_cmd: mode description
* @obj: backing object
*
* Configure and fill in the boilerplate for our frame buffer. Return
* 0 on success or an error code if we fail.
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
const struct drm_format_info *info;
int ret;
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
info = drm_get_format_info(dev, mode_cmd);
if (!info || !info->depth || info->cpp[0] > 4)
return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
}
return 0;
}
/**
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
* @mode_cmd: the description of the requested mode
* @obj: the backing object
*
* Create a framebuffer object backed by the gt, and fill in the
* boilerplate required
*
* TODO: review object references
*/
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
struct drm_framebuffer *fb;
int ret;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
}
return fb;
}
/**
* psb_user_framebuffer_create - create framebuffer
* @dev: our DRM device
* @filp: client file
* @cmd: mode request
*
* Create a new framebuffer backed by a userspace GEM object
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
/*
* Find the GEM object and thus the gtt range object that is
* to back this space
*/
obj = drm_gem_object_lookup(filp, cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
fb = psb_framebuffer_create(dev, cmd, obj);
if (IS_ERR(fb))
drm_gem_object_put(obj);
return fb;
}
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
};
static void psb_setup_outputs(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
/* It is ok for this to fail - we just don't get backlight control */
if (!dev_priv->backlight_property)
dev_priv->backlight_property = drm_property_create_range(dev, 0,
"backlight", 0, 100);
dev_priv->ops->output_init(dev);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_encoder *encoder = &gma_encoder->base;
int crtc_mask = 0, clone_mask = 0;
/* valid crtcs */
switch (gma_encoder->type) {
case INTEL_OUTPUT_ANALOG:
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
break;
case INTEL_OUTPUT_SDVO:
crtc_mask = dev_priv->ops->sdvo_mask;
clone_mask = 0;
break;
case INTEL_OUTPUT_LVDS:
crtc_mask = dev_priv->ops->lvds_mask;
clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI:
crtc_mask = (1 << 0);
clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI2:
crtc_mask = (1 << 2);
clone_mask = 0;
break;
case INTEL_OUTPUT_HDMI:
crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
case INTEL_OUTPUT_DISPLAYPORT:
crtc_mask = (1 << 0) | (1 << 1);
clone_mask = 0;
break;
case INTEL_OUTPUT_EDP:
crtc_mask = (1 << 1);
clone_mask = 0;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
gma_connector_clones(dev, clone_mask);
}
drm_connector_list_iter_end(&conn_iter);
}
void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int i;
if (drmm_mode_config_init(dev))
return;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.funcs = &psb_mode_funcs;
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
psb_intel_crtc_init(dev, i, mode_dev);
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
psb_setup_outputs(dev);
if (dev_priv->ops->errata)
dev_priv->ops->errata(dev);
dev_priv->modeset = true;
}
void psb_modeset_cleanup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
}
}
| linux-master | drivers/gpu/drm/gma500/framebuffer.c |
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include <linux/delay.h>
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include "cdv_device.h"
#include "gma_device.h"
#include "intel_bios.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
#define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5
static void cdv_disable_vga(struct drm_device *dev)
{
u8 sr1;
u32 vga_reg;
vga_reg = VGACNTRL;
outb(1, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
udelay(300);
REG_WRITE(vga_reg, VGA_DISP_DISABLE);
REG_READ(vga_reg);
}
static int cdv_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
drm_mode_create_scaling_mode_property(dev);
cdv_disable_vga(dev);
cdv_intel_crt_init(dev, &dev_priv->mode_dev);
cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
/* These bits indicate HDMI not SDVO on CDV */
if (REG_READ(SDVOB) & SDVO_DETECTED) {
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
if (REG_READ(DP_B) & DP_DETECTED)
cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
}
if (REG_READ(SDVOC) & SDVO_DETECTED) {
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
if (REG_READ(DP_C) & DP_DETECTED)
cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
}
return 0;
}
/*
* Cedartrail Backlght Interfaces
*/
static int cdv_backlight_combination_mode(struct drm_device *dev)
{
return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
static u32 cdv_get_max_backlight(struct drm_device *dev)
{
u32 max = REG_READ(BLC_PWM_CTL);
if (max == 0) {
DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
/* i915 does this, I believe which means that we should not
* smash PWM control as firmware will take control of it. */
return 1;
}
max >>= 16;
if (cdv_backlight_combination_mode(dev))
max *= 0xff;
return max;
}
static int cdv_get_brightness(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (cdv_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
pci_read_config_byte(pdev, 0xF4, &lbpc);
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
}
static void cdv_set_brightness(struct drm_device *dev, int level)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 blc_pwm_ctl;
level *= cdv_get_max_backlight(dev);
level /= 100;
if (cdv_backlight_combination_mode(dev)) {
u32 max = cdv_get_max_backlight(dev);
u8 lbpc;
lbpc = level * 0xfe / max + 1;
level /= lbpc;
pci_write_config_byte(pdev, 0xF4, lbpc);
}
blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
}
static int cdv_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_level = cdv_get_brightness(dev);
cdv_set_brightness(dev, dev_priv->backlight_level);
return 0;
}
/*
* Provide the Cedarview specific chip logic and low level methods
* for power management
*
* FIXME: we need to implement the apm/ospm base management bits
* for this and the MID devices.
*/
static inline u32 CDV_MSG_READ32(int domain, uint port, uint offset)
{
int mcr = (0x10<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
u32 value)
{
int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD4, value);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
#define PSB_PM_SSC 0x20
#define PSB_PM_SSS 0x30
#define PSB_PWRGT_GFX_ON 0x02
#define PSB_PWRGT_GFX_OFF 0x01
#define PSB_PWRGT_GFX_D0 0x00
#define PSB_PWRGT_GFX_D3 0x03
static void cdv_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
int domain = pci_domain_nr(pdev->bus);
int i;
dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
PSB_APMBA) & 0xFFFF;
dev_priv->ospm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
PSB_OSPMBA) & 0xFFFF;
/* Power status */
pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
/* Enable the GPU */
pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
pwr_cnt |= PSB_PWRGT_GFX_ON;
outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
/* Wait for the GPU power */
for (i = 0; i < 5; i++) {
u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
return;
udelay(10);
}
dev_err(dev->dev, "GPU: power management timed out.\n");
}
static void cdv_errata(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
/* Disable bonus launch.
* CPU and GPU competes for memory and display misses updates and
* flickers. Worst with dual core, dual displays.
*
* Fixes were done to Win 7 gfx driver to disable a feature called
* Bonus Launch to work around the issue, by degrading
* performance.
*/
CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
* cdv_save_display_registers - save registers lost on suspend
* @dev: our DRM device
*
* Save the state we need in order to be able to restore the interface
* upon resume from suspend
*/
static int cdv_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
dev_dbg(dev->dev, "Saving GPU registers.\n");
pci_read_config_byte(pdev, 0xF4, ®s->cdv.saveLBB);
regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
regs->cdv.saveDSPARB = REG_READ(DSPARB);
regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1);
regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2);
regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3);
regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4);
regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5);
regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6);
regs->cdv.saveADPA = REG_READ(ADPA);
regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL);
regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2);
regs->cdv.saveLVDS = REG_READ(LVDS);
regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS);
regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS);
regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE);
regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL);
regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&conn_iter);
return 0;
}
/**
* cdv_restore_display_registers - restore lost register state
* @dev: our DRM device
*
* Restore register state that was lost during suspend and resume.
*
* FIXME: review
*/
static int cdv_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
u32 temp;
pci_write_config_byte(pdev, 0xF4, regs->cdv.saveLBB);
REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
/* BIOS does below anyway */
REG_WRITE(DPIO_CFG, 0);
REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
temp = REG_READ(DPLL_A);
if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE);
REG_READ(DPLL_A);
}
temp = REG_READ(DPLL_B);
if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE);
REG_READ(DPLL_B);
}
udelay(500);
REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]);
REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]);
REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]);
REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]);
REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]);
REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]);
REG_WRITE(DSPARB, regs->cdv.saveDSPARB);
REG_WRITE(ADPA, regs->cdv.saveADPA);
REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2);
REG_WRITE(LVDS, regs->cdv.saveLVDS);
REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL);
REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS);
REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL);
REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS);
REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS);
REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE);
REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL);
REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL);
REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER);
REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
/* Fix arbitration bug */
cdv_errata(dev);
drm_mode_config_reset(dev);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&conn_iter);
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
return 0;
}
static int cdv_power_down(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
pwr_cnt |= PSB_PWRGT_GFX_OFF;
pwr_mask = PSB_PWRGT_GFX_MASK;
outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
while (tries--) {
pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3)
return 0;
udelay(10);
}
return 0;
}
static int cdv_power_up(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
pwr_cnt |= PSB_PWRGT_GFX_ON;
pwr_mask = PSB_PWRGT_GFX_MASK;
outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
while (tries--) {
pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0)
return 0;
udelay(10);
}
return 0;
}
static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
hotplug_work);
struct drm_device *dev = &dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
/* The core driver has received a hotplug IRQ. We are in IRQ context
so extract the needed information and kick off queued processing */
static int cdv_hotplug_event(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
schedule_work(&dev_priv->hotplug_work);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
return 1;
}
static void cdv_hotplug_enable(struct drm_device *dev, bool on)
{
if (on) {
u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
REG_WRITE(PORT_HOTPLUG_EN, hotplug);
} else {
REG_WRITE(PORT_HOTPLUG_EN, 0);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
}
}
static const char *force_audio_names[] = {
"off",
"auto",
"on",
};
void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
prop = dev_priv->force_audio_property;
if (prop == NULL) {
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"audio",
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
drm_property_add_enum(prop, i-1, force_audio_names[i]);
dev_priv->force_audio_property = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
static const char *broadcast_rgb_names[] = {
"Full",
"Limited 16:235",
};
void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
drm_property_add_enum(prop, i, broadcast_rgb_names[i]);
dev_priv->broadcast_rgb_property = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
/* Cedarview */
static const struct psb_offset cdv_regmap[2] = {
{
.fp0 = FPA0,
.fp1 = FPA1,
.cntr = DSPACNTR,
.conf = PIPEACONF,
.src = PIPEASRC,
.dpll = DPLL_A,
.dpll_md = DPLL_A_MD,
.htotal = HTOTAL_A,
.hblank = HBLANK_A,
.hsync = HSYNC_A,
.vtotal = VTOTAL_A,
.vblank = VBLANK_A,
.vsync = VSYNC_A,
.stride = DSPASTRIDE,
.size = DSPASIZE,
.pos = DSPAPOS,
.base = DSPABASE,
.surf = DSPASURF,
.addr = DSPABASE,
.status = PIPEASTAT,
.linoff = DSPALINOFF,
.tileoff = DSPATILEOFF,
.palette = PALETTE_A,
},
{
.fp0 = FPB0,
.fp1 = FPB1,
.cntr = DSPBCNTR,
.conf = PIPEBCONF,
.src = PIPEBSRC,
.dpll = DPLL_B,
.dpll_md = DPLL_B_MD,
.htotal = HTOTAL_B,
.hblank = HBLANK_B,
.hsync = HSYNC_B,
.vtotal = VTOTAL_B,
.vblank = VBLANK_B,
.vsync = VSYNC_B,
.stride = DSPBSTRIDE,
.size = DSPBSIZE,
.pos = DSPBPOS,
.base = DSPBBASE,
.surf = DSPBSURF,
.addr = DSPBBASE,
.status = PIPEBSTAT,
.linoff = DSPBLINOFF,
.tileoff = DSPBTILEOFF,
.palette = PALETTE_B,
}
};
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
dev_priv->use_msi = true;
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
cdv_hotplug_enable(dev, false);
return 0;
}
/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
const struct psb_ops cdv_chip_ops = {
.name = "GMA3600/3650",
.pipes = 2,
.crtcs = 2,
.hdmi_mask = (1 << 0) | (1 << 1),
.lvds_mask = (1 << 1),
.sdvo_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,
.errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
.clock_funcs = &cdv_clock_funcs,
.output_init = cdv_output_init,
.hotplug = cdv_hotplug_event,
.hotplug_enable = cdv_hotplug_enable,
.backlight_init = cdv_backlight_init,
.backlight_get = cdv_get_brightness,
.backlight_set = cdv_set_brightness,
.backlight_name = "psb-bl",
.init_pm = cdv_init_pm,
.save_regs = cdv_save_display_registers,
.restore_regs = cdv_restore_display_registers,
.save_crtc = gma_crtc_save,
.restore_crtc = gma_crtc_restore,
.power_down = cdv_power_down,
.power_up = cdv_power_up,
.update_wm = cdv_update_wm,
.disable_sr = cdv_disable_sr,
};
| linux-master | drivers/gpu/drm/gma500/cdv_device.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <drm/gpu_scheduler.h>
static struct kmem_cache *sched_fence_slab;
static int __init drm_sched_fence_slab_init(void)
{
sched_fence_slab = kmem_cache_create(
"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
return 0;
}
static void __exit drm_sched_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(sched_fence_slab);
}
static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
struct dma_fence *fence)
{
/*
* smp_store_release() to ensure another thread racing us
* in drm_sched_fence_set_deadline_finished() sees the
* fence's parent set before test_bit()
*/
smp_store_release(&s_fence->parent, dma_fence_get(fence));
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
&s_fence->finished.flags))
dma_fence_set_deadline(fence, s_fence->deadline);
}
void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
struct dma_fence *parent)
{
/* Set the parent before signaling the scheduled fence, such that,
* any waiter expecting the parent to be filled after the job has
* been scheduled (which is the case for drivers delegating waits
* to some firmware) doesn't have to busy wait for parent to show
* up.
*/
if (!IS_ERR_OR_NULL(parent))
drm_sched_fence_set_parent(fence, parent);
dma_fence_signal(&fence->scheduled);
}
void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
{
if (result)
dma_fence_set_error(&fence->finished, result);
dma_fence_signal(&fence->finished);
}
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "drm_sched";
}
static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct drm_sched_fence *fence = to_drm_sched_fence(f);
return (const char *)fence->sched->name;
}
static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct drm_sched_fence *fence = to_drm_sched_fence(f);
if (!WARN_ON_ONCE(!fence))
kmem_cache_free(sched_fence_slab, fence);
}
/**
* drm_sched_fence_free - free up an uninitialized fence
*
* @fence: fence to free
*
* Free up the fence memory. Should only be used if drm_sched_fence_init()
* has not been called yet.
*/
void drm_sched_fence_free(struct drm_sched_fence *fence)
{
/* This function should not be called if the fence has been initialized. */
if (!WARN_ON_ONCE(fence->sched))
kmem_cache_free(sched_fence_slab, fence);
}
/**
* drm_sched_fence_release_scheduled - callback that fence can be freed
*
* @f: fence
*
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
static void drm_sched_fence_release_scheduled(struct dma_fence *f)
{
struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
}
/**
* drm_sched_fence_release_finished - drop extra reference
*
* @f: fence
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
static void drm_sched_fence_release_finished(struct dma_fence *f)
{
struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(&fence->scheduled);
}
static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
ktime_t deadline)
{
struct drm_sched_fence *fence = to_drm_sched_fence(f);
struct dma_fence *parent;
unsigned long flags;
spin_lock_irqsave(&fence->lock, flags);
/* If we already have an earlier deadline, keep it: */
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
ktime_before(fence->deadline, deadline)) {
spin_unlock_irqrestore(&fence->lock, flags);
return;
}
fence->deadline = deadline;
set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
spin_unlock_irqrestore(&fence->lock, flags);
/*
* smp_load_aquire() to ensure that if we are racing another
* thread calling drm_sched_fence_set_parent(), that we see
* the parent set before it calls test_bit(HAS_DEADLINE_BIT)
*/
parent = smp_load_acquire(&fence->parent);
if (parent)
dma_fence_set_deadline(parent, deadline);
}
static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_scheduled,
};
static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
.set_deadline = drm_sched_fence_set_deadline_finished,
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
{
if (f->ops == &drm_sched_fence_ops_scheduled)
return container_of(f, struct drm_sched_fence, scheduled);
if (f->ops == &drm_sched_fence_ops_finished)
return container_of(f, struct drm_sched_fence, finished);
return NULL;
}
EXPORT_SYMBOL(to_drm_sched_fence);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
void *owner)
{
struct drm_sched_fence *fence = NULL;
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
spin_lock_init(&fence->lock);
return fence;
}
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity)
{
unsigned seq;
fence->sched = entity->rq->sched;
seq = atomic_inc_return(&entity->fence_seq);
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
&fence->lock, entity->fence_context + 1, seq);
}
module_init(drm_sched_fence_slab_init);
module_exit(drm_sched_fence_slab_fini);
MODULE_DESCRIPTION("DRM GPU scheduler");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/scheduler/sched_fence.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include "gpu_scheduler_trace.h"
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
/**
* drm_sched_entity_init - Init a context entity used by scheduler when
* submit to HW ring.
*
* @entity: scheduler entity to init
* @priority: priority of the entity
* @sched_list: the list of drm scheds on which jobs from this
* entity can be submitted
* @num_sched_list: number of drm sched in sched_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
* Note that the &sched_list must have at least one element to schedule the entity.
*
* For changing @priority later on at runtime see
* drm_sched_entity_set_priority(). For changing the set of schedulers
* @sched_list at runtime see drm_sched_entity_modify_sched().
*
* An entity is cleaned up by callind drm_sched_entity_fini(). See also
* drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list,
atomic_t *guilty)
{
if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = NULL;
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
if(num_sched_list)
entity->rq = &sched_list[0]->sched_rq[entity->priority];
init_completion(&entity->entity_idle);
/* We start in an idle state. */
complete_all(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
EXPORT_SYMBOL(drm_sched_entity_init);
/**
* drm_sched_entity_modify_sched - Modify sched of an entity
* @entity: scheduler entity to init
* @sched_list: the list of new drm scheds which will replace
* existing entity->sched_list
* @num_sched_list: number of drm sched in sched_list
*
* Note that this must be called under the same common lock for @entity as
* drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
* guarantee through some other means that this is never called while new jobs
* can be pushed to @entity.
*/
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list)
{
WARN_ON(!num_sched_list || !sched_list);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
spsc_queue_count(&entity->job_queue) == 0 ||
entity->stopped)
return true;
return false;
}
/* Return true if entity could provide a job. */
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
return false;
if (READ_ONCE(entity->dependency))
return false;
return true;
}
/**
* drm_sched_entity_error - return error of last scheduled job
* @entity: scheduler entity to check
*
* Opportunistically return the error of the last scheduled job. Result can
* change any time when new jobs are pushed to the hw.
*/
int drm_sched_entity_error(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
int r;
rcu_read_lock();
fence = rcu_dereference(entity->last_scheduled);
r = fence ? fence->error : 0;
rcu_read_unlock();
return r;
}
EXPORT_SYMBOL(drm_sched_entity_error);
static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
drm_sched_fence_finished(job->s_fence, -ESRCH);
WARN_ON(job->s_fence->parent);
job->sched->ops->free_job(job);
}
/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
unsigned long index;
dma_fence_put(f);
/* Wait for all dependencies to avoid data corruptions */
xa_for_each(&job->dependencies, index, f) {
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence && f == &s_fence->scheduled) {
/* The dependencies array had a reference on the scheduled
* fence, and the finished fence refcount might have
* dropped to zero. Use dma_fence_get_rcu() so we get
* a NULL fence in that case.
*/
f = dma_fence_get_rcu(&s_fence->finished);
/* Now that we have a reference on the finished fence,
* we can release the reference the dependencies array
* had on the scheduled fence.
*/
dma_fence_put(&s_fence->scheduled);
}
xa_erase(&job->dependencies, index);
if (f && !dma_fence_add_callback(f, &job->finish_cb,
drm_sched_entity_kill_jobs_cb))
return;
dma_fence_put(f);
}
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
/* Remove the entity from the scheduler and kill all pending jobs */
static void drm_sched_entity_kill(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
struct dma_fence *prev;
if (!entity->rq)
return;
spin_lock(&entity->rq_lock);
entity->stopped = true;
drm_sched_rq_remove_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
/* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev);
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
drm_sched_entity_kill_jobs_cb))
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
prev = &s_fence->finished;
}
dma_fence_put(prev);
}
/**
* drm_sched_entity_flush - Flush a context entity
*
* @entity: scheduler entity
* @timeout: time to wait in for Q to become empty in jiffies.
*
* Splitting drm_sched_entity_fini() into two functions, The first one does the
* waiting, removes the entity from the runqueue and returns an error when the
* process was killed.
*
* Returns the remaining time in jiffies left from the input timeout
*/
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
struct drm_gpu_scheduler *sched;
struct task_struct *last_user;
long ret = timeout;
if (!entity->rq)
return 0;
sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
*/
if (current->flags & PF_EXITING) {
if (timeout)
ret = wait_event_timeout(
sched->job_scheduled,
drm_sched_entity_is_idle(entity),
timeout);
} else {
wait_event_killable(sched->job_scheduled,
drm_sched_entity_is_idle(entity));
}
/* For killed process disable any more IBs enqueue right now */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
return ret;
}
EXPORT_SYMBOL(drm_sched_entity_flush);
/**
* drm_sched_entity_fini - Destroy a context entity
*
* @entity: scheduler entity
*
* Cleanups up @entity which has been initialized by drm_sched_entity_init().
*
* If there are potentially job still in flight or getting newly queued
* drm_sched_entity_flush() must be called first. This function then goes over
* the entity and signals all jobs with an error code if the process was killed.
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
/*
* If consumption of existing IBs wasn't completed. Forcefully remove
* them here. Also makes sure that the scheduler won't touch this entity
* any more.
*/
drm_sched_entity_kill(entity);
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency, &entity->cb);
dma_fence_put(entity->dependency);
entity->dependency = NULL;
}
dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
RCU_INIT_POINTER(entity->last_scheduled, NULL);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_destroy - Destroy a context entity
* @entity: scheduler entity
*
* Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
* convenience wrapper.
*/
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(entity);
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
static void drm_sched_entity_clear_dep(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
}
/*
* drm_sched_entity_clear_dep - callback to clear the entities dependency and
* wake up scheduler
*/
static void drm_sched_entity_wakeup(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb);
drm_sched_wakeup_if_can_queue(entity->rq->sched);
}
/**
* drm_sched_entity_set_priority - Sets priority of the entity
*
* @entity: scheduler entity
* @priority: scheduler priority
*
* Update the priority of runqueus used for the entity.
*/
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
spin_lock(&entity->rq_lock);
entity->priority = priority;
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
/*
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence *fence = entity->dependency;
struct drm_sched_fence *s_fence;
if (fence->context == entity->fence_context ||
fence->context == entity->fence_context + 1) {
/*
* Fence is a scheduled/finished fence from a job
* which belongs to the same entity, we can ignore
* fences from ourself
*/
dma_fence_put(entity->dependency);
return false;
}
s_fence = to_drm_sched_fence(fence);
if (!fence->error && s_fence && s_fence->sched == sched &&
!test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(entity->dependency);
entity->dependency = fence;
if (!dma_fence_add_callback(fence, &entity->cb,
drm_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
dma_fence_put(fence);
return false;
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drm_sched_entity_wakeup))
return true;
dma_fence_put(entity->dependency);
return false;
}
static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job *job,
struct drm_sched_entity *entity)
{
struct dma_fence *f;
/* We keep the fence around, so we can iterate over all dependencies
* in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
* before killing the job.
*/
f = xa_load(&job->dependencies, job->last_dependency);
if (f) {
job->last_dependency++;
return dma_fence_get(f);
}
if (job->sched->ops->prepare_job)
return job->sched->ops->prepare_job(job, entity);
return NULL;
}
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
if (!sched_job)
return NULL;
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
}
/* skip jobs from entity that marked guilty */
if (entity->guilty && atomic_read(entity->guilty))
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
rcu_assign_pointer(entity->last_scheduled,
dma_fence_get(&sched_job->s_fence->finished));
/*
* If the queue is empty we allow drm_sched_entity_select_rq() to
* locklessly access ->last_scheduled. This only works if we set the
* pointer before we dequeue and if we a write barrier here.
*/
smp_wmb();
spsc_queue_pop(&entity->job_queue);
/*
* Update the entity's location in the min heap according to
* the timestamp of the next job, if any.
*/
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
struct drm_sched_job *next;
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
if (next)
drm_sched_rq_update_fifo(entity, next->submit_ts);
}
/* Jobs and entities might have different lifecycles. Since we're
* removing the job from the entities queue, set the jobs entity pointer
* to NULL to prevent any future access of the entity through this job.
*/
sched_job->entity = NULL;
return sched_job;
}
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
/* single possible engine and already selected */
if (!entity->sched_list)
return;
/* queue non-empty, stay on the same engine */
if (spsc_queue_count(&entity->job_queue))
return;
/*
* Only when the queue is empty are we guaranteed that the scheduler
* thread cannot change ->last_scheduled. To enforce ordering we need
* a read barrier here. See drm_sched_entity_pop_job() for the other
* side.
*/
smp_rmb();
fence = rcu_dereference_check(entity->last_scheduled, true);
/* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence))
return;
spin_lock(&entity->rq_lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
rq = sched ? &sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
spin_unlock(&entity->rq_lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
}
/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
* @sched_job: job to submit
*
* Note: To guarantee that the order of insertion to queue matches the job's
* fence sequence number this function should be called with drm_sched_job_arm()
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
*
* Returns 0 for success, negative error code otherwise.
*/
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
bool first;
ktime_t submit_ts;
trace_drm_sched_job(sched_job, entity);
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
/*
* After the sched_job is pushed into the entity queue, it may be
* completed and freed up at any time. We can no longer access it.
* Make sure to set the submit_ts first, to avoid a race.
*/
sched_job->submit_ts = submit_ts = ktime_get();
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
if (entity->stopped) {
spin_unlock(&entity->rq_lock);
DRM_ERROR("Trying to push to a killed entity\n");
return;
}
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts);
drm_sched_wakeup_if_can_queue(entity->rq->sched);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);
| linux-master | drivers/gpu/drm/scheduler/sched_entity.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
/**
* DOC: Overview
*
* The GPU scheduler provides entities which allow userspace to push jobs
* into software queues which are then scheduled on a hardware run queue.
* The software queues have a priority among them. The scheduler selects the entities
* from the run queue using a FIFO. The scheduler provides dependency handling
* features among jobs. The driver is supposed to provide callback functions for
* backend operations to the scheduler like submitting a job to hardware run queue,
* returning the dependencies of a job etc.
*
* The organisation of the scheduler is the following:
*
* 1. Each hw run queue has one scheduler
* 2. Each scheduler has multiple run queues with different priorities
* (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
* 3. Each scheduler run queue has a queue of entities to schedule
* 4. Entities themselves maintain a queue of jobs that will be scheduled on
* the hardware.
*
* The jobs in a entity are always scheduled in the order that they were pushed.
*
* Note that once a job was taken from the entities queue and pushed to the
* hardware, i.e. the pending queue, the entity must not be referenced anymore
* through the jobs entity pointer.
*/
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/dma-resv.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
#include <drm/drm_gem.h>
#include <drm/drm_syncobj.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
* DOC: sched_policy (int)
* Used to override default entities scheduling policy in a run queue.
*/
MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
module_param_named(sched_policy, drm_sched_policy, int, 0444);
static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
const struct rb_node *b)
{
struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
}
static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
{
struct drm_sched_rq *rq = entity->rq;
if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
RB_CLEAR_NODE(&entity->rb_tree_node);
}
}
void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
{
/*
* Both locks need to be grabbed, one to protect from entity->rq change
* for entity from within concurrent drm_sched_entity_select_rq and the
* other to update the rb tree structure.
*/
spin_lock(&entity->rq_lock);
spin_lock(&entity->rq->lock);
drm_sched_rq_remove_fifo_locked(entity);
entity->oldest_job_waiting = ts;
rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
drm_sched_entity_compare_before);
spin_unlock(&entity->rq->lock);
spin_unlock(&entity->rq_lock);
}
/**
* drm_sched_rq_init - initialize a given run queue struct
*
* @sched: scheduler instance to associate with this run queue
* @rq: scheduler run queue
*
* Initializes a scheduler runqueue.
*/
static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->rb_tree_root = RB_ROOT_CACHED;
rq->current_entity = NULL;
rq->sched = sched;
}
/**
* drm_sched_rq_add_entity - add an entity
*
* @rq: scheduler run queue
* @entity: scheduler entity
*
* Adds a scheduler entity to the run queue.
*/
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
/**
* drm_sched_rq_remove_entity - remove an entity
*
* @rq: scheduler run queue
* @entity: scheduler entity
*
* Removes a scheduler entity from the run queue.
*/
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
atomic_dec(rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_remove_fifo_locked(entity);
spin_unlock(&rq->lock);
}
/**
* drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
* @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
}
}
list_for_each_entry(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
if (entity == rq->current_entity)
break;
}
spin_unlock(&rq->lock);
return NULL;
}
/**
* drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
*
* @rq: scheduler run queue to check.
*
* Find oldest waiting ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
{
struct rb_node *rb;
spin_lock(&rq->lock);
for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
struct drm_sched_entity *entity;
entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
break;
}
}
spin_unlock(&rq->lock);
return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
}
/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
* Finish the job's fence and wake up the worker thread.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
struct drm_sched_fence *s_fence = s_job->s_fence;
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
atomic_dec(sched->score);
trace_drm_sched_process_job(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
/**
* drm_sched_job_done_cb - the callback for a done job
* @f: fence
* @cb: fence callbacks
*/
static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
drm_sched_job_done(s_job, f->error);
}
/**
* drm_sched_start_timeout - start timeout for reset worker
*
* @sched: scheduler instance to start the worker for
*
* Start the timeout for the given scheduler.
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->pending_list))
queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
}
/**
* drm_sched_fault - immediately start timeout handler
*
* @sched: scheduler where the timeout handling should be started.
*
* Start timeout handling immediately when the driver detects a hardware fault.
*/
void drm_sched_fault(struct drm_gpu_scheduler *sched)
{
if (sched->timeout_wq)
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
}
EXPORT_SYMBOL(drm_sched_fault);
/**
* drm_sched_suspend_timeout - Suspend scheduler job timeout
*
* @sched: scheduler instance for which to suspend the timeout
*
* Suspend the delayed work timeout for the scheduler. This is done by
* modifying the delayed work timeout to an arbitrary large value,
* MAX_SCHEDULE_TIMEOUT in this case.
*
* Returns the timeout remaining
*
*/
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
{
unsigned long sched_timeout, now = jiffies;
sched_timeout = sched->work_tdr.timer.expires;
/*
* Modify the timeout to an arbitrarily large value. This also prevents
* the timeout to be restarted when new submissions arrive
*/
if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
&& time_after(sched_timeout, now))
return sched_timeout - now;
else
return sched->timeout;
}
EXPORT_SYMBOL(drm_sched_suspend_timeout);
/**
* drm_sched_resume_timeout - Resume scheduler job timeout
*
* @sched: scheduler instance for which to resume the timeout
* @remaining: remaining timeout
*
* Resume the delayed work timeout for the scheduler.
*/
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining)
{
spin_lock(&sched->job_list_lock);
if (list_empty(&sched->pending_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->list, &sched->pending_list);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (job) {
/*
* Remove the bad job so it cannot be freed by concurrent
* drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
* is parked at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
status = job->sched->ops->timedout_job(job);
/*
* Guilty job did complete and hence needs to be manually removed
* See drm_sched_stop doc.
*/
if (sched->free_guilty) {
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
} else {
spin_unlock(&sched->job_list_lock);
}
if (status != DRM_GPU_SCHED_STAT_ENODEV) {
spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
}
/**
* drm_sched_stop - stop the scheduler
*
* @sched: scheduler instance
* @bad: job which caused the time out
*
* Stop the scheduler and also removes and frees all completed jobs.
* Note: bad job will not be freed as it might be used later and so it's
* callers responsibility to release it manually if it's not part of the
* pending list any more.
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
kthread_park(sched->thread);
/*
* Reinsert back the bad job here - now it's safe as
* drm_sched_get_cleanup_job cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
* (earlier) cleanups and drm_sched_get_cleanup_job will not be called
* now until the scheduler thread is unparked.
*/
if (bad && bad->sched == sched)
/*
* Add at the head of the queue to reflect it was the earliest
* job extracted.
*/
list_add(&bad->list, &sched->pending_list);
/*
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
* This iteration is thread safe as sched thread is stopped.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
atomic_dec(&sched->hw_rq_count);
} else {
/*
* remove job from pending_list.
* Locking here is for concurrent resume timeout
*/
spin_lock(&sched->job_list_lock);
list_del_init(&s_job->list);
spin_unlock(&sched->job_list_lock);
/*
* Wait for job's HW fence callback to finish using s_job
* before releasing it.
*
* Job is still alive so fence refcount at least 1
*/
dma_fence_wait(&s_job->s_fence->finished, false);
/*
* We must keep bad job alive for later use during
* recovery by some of the drivers but leave a hint
* that the guilty job must be released.
*/
if (bad != s_job)
sched->ops->free_job(s_job);
else
sched->free_guilty = true;
}
}
/*
* Stop pending timer in flight as we rearm it in drm_sched_start. This
* avoids the pending timeout work in progress to fire right away after
* this TDR finished and before the newly restarted jobs had a
* chance to complete.
*/
cancel_delayed_work(&sched->work_tdr);
}
EXPORT_SYMBOL(drm_sched_stop);
/**
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
* @full_recovery: proceed with complete sched restart
*
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
int r;
/*
* Locking the list is not required here as the sched thread is parked
* so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count);
if (!full_recovery)
continue;
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(s_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
} else
drm_sched_job_done(s_job, -ECANCELED);
}
if (full_recovery) {
spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
kthread_unpark(sched->thread);
}
EXPORT_SYMBOL(drm_sched_start);
/**
* drm_sched_resubmit_jobs - Deprecated, don't use in new code!
*
* @sched: scheduler instance
*
* Re-submitting jobs was a concept AMD came up as cheap way to implement
* recovery after a job timeout.
*
* This turned out to be not working very well. First of all there are many
* problem with the dma_fence implementation and requirements. Either the
* implementation is risking deadlocks with core memory management or violating
* documented implementation details of the dma_fence object.
*
* Drivers can still save and restore their state for recovery operations, but
* we shouldn't make this a general scheduler feature around the dma_fence
* interface.
*/
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context;
bool found_guilty = false;
struct dma_fence *fence;
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
found_guilty = true;
guilty_context = s_job->s_fence->scheduled.context;
}
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
fence = sched->ops->run_job(s_job);
if (IS_ERR_OR_NULL(fence)) {
if (IS_ERR(fence))
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
s_job->s_fence->parent = NULL;
} else {
s_job->s_fence->parent = dma_fence_get(fence);
/* Drop for orignal kref_init */
dma_fence_put(fence);
}
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
/**
* drm_sched_job_init - init a scheduler job
* @job: scheduler job to init
* @entity: scheduler entity to use
* @owner: job owner for debugging
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
*
* Drivers must make sure drm_sched_job_cleanup() if this function returns
* successfully, even when @job is aborted before drm_sched_job_arm() is called.
*
* WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
* has died, which can mean that there's no valid runqueue for a @entity.
* This function returns -ENOENT in this case (which probably should be -EIO as
* a more meanigful return value).
*
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
if (!entity->rq)
return -ENOENT;
job->entity = entity;
job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
INIT_LIST_HEAD(&job->list);
xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
return 0;
}
EXPORT_SYMBOL(drm_sched_job_init);
/**
* drm_sched_job_arm - arm a scheduler job for execution
* @job: scheduler job to arm
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
* or other places that need to track the completion of this job.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_entity *entity = job->entity;
BUG_ON(!entity);
drm_sched_entity_select_rq(entity);
sched = entity->rq->sched;
job->sched = sched;
job->s_priority = entity->rq - sched->sched_rq;
job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
EXPORT_SYMBOL(drm_sched_job_arm);
/**
* drm_sched_job_add_dependency - adds the fence as a job dependency
* @job: scheduler job to add the dependencies to
* @fence: the dma_fence to add to the list of dependencies.
*
* Note that @fence is consumed in both the success and error cases.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence)
{
struct dma_fence *entry;
unsigned long index;
u32 id = 0;
int ret;
if (!fence)
return 0;
/* Deduplicate if we already depend on a fence from the same context.
* This lets the size of the array of deps scale with the number of
* engines involved, rather than the number of BOs.
*/
xa_for_each(&job->dependencies, index, entry) {
if (entry->context != fence->context)
continue;
if (dma_fence_is_later(fence, entry)) {
dma_fence_put(entry);
xa_store(&job->dependencies, index, fence, GFP_KERNEL);
} else {
dma_fence_put(fence);
}
return 0;
}
ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
if (ret != 0)
dma_fence_put(fence);
return ret;
}
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
* drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
* @job: scheduler job to add the dependencies to
* @file: drm file private pointer
* @handle: syncobj handle to lookup
* @point: timeline point
*
* This adds the fence matching the given syncobj to @job.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
struct drm_file *file,
u32 handle,
u32 point)
{
struct dma_fence *fence;
int ret;
ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
if (ret)
return ret;
return drm_sched_job_add_dependency(job, fence);
}
EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
/**
* drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
* @job: scheduler job to add the dependencies to
* @resv: the dma_resv object to get the fences from
* @usage: the dma_resv_usage to use to filter the fences
*
* This adds all fences matching the given usage from @resv to @job.
* Must be called with the @resv lock held.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
struct dma_resv *resv,
enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
int ret;
dma_resv_assert_held(resv);
dma_resv_for_each_fence(&cursor, resv, usage, fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
if (ret) {
dma_fence_put(fence);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
/**
* drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
* dependencies
* @job: scheduler job to add the dependencies to
* @obj: the gem object to add new dependencies from.
* @write: whether the job might write the object (so we need to depend on
* shared fences in the reservation object).
*
* This should be called after drm_gem_lock_reservations() on your array of
* GEM objects used in the job but before updating the reservations with your
* own fences.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write)
{
return drm_sched_job_add_resv_dependencies(job, obj->resv,
dma_resv_usage_rw(write));
}
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
* drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
*
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
* before drm_sched_job_arm() is called.
*
* After that point of no return @job is committed to be executed by the
* scheduler, and this function should be called from the
* &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
struct dma_fence *fence;
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
/* drm_sched_job_arm() has been called */
dma_fence_put(&job->s_fence->finished);
} else {
/* aborted job before committing to run it */
drm_sched_fence_free(job->s_fence);
}
job->s_fence = NULL;
xa_for_each(&job->dependencies, index, fence) {
dma_fence_put(fence);
}
xa_destroy(&job->dependencies);
}
EXPORT_SYMBOL(drm_sched_job_cleanup);
/**
* drm_sched_can_queue -- Can we queue more to the hardware?
* @sched: scheduler instance
*
* Return true if we can push more jobs to the hw, otherwise false.
*/
static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
{
return atomic_read(&sched->hw_rq_count) <
sched->hw_submission_limit;
}
/**
* drm_sched_wakeup_if_can_queue - Wake up the scheduler
* @sched: scheduler instance
*
* Wake up the scheduler if we can queue jobs.
*/
void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
{
if (drm_sched_can_queue(sched))
wake_up_interruptible(&sched->wake_up_worker);
}
/**
* drm_sched_select_entity - Select next entity to process
*
* @sched: scheduler instance
*
* Returns the entity to process or NULL if none are found.
*/
static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
struct drm_sched_entity *entity;
int i;
if (!drm_sched_can_queue(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
if (entity)
break;
}
return entity;
}
/**
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
* Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
/* make the scheduled timestamp more accurate */
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
} else {
job = NULL;
}
spin_unlock(&sched->job_list_lock);
return job;
}
/**
* drm_sched_pick_best - Get a drm sched from a sched_list with the least load
* @sched_list: list of drm_gpu_schedulers
* @num_sched_list: number of drm_gpu_schedulers in the sched_list
*
* Returns pointer of the sched with the least load or NULL if none of the
* drm_gpu_schedulers are ready
*/
struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list)
{
struct drm_gpu_scheduler *sched, *picked_sched = NULL;
int i;
unsigned int min_score = UINT_MAX, num_score;
for (i = 0; i < num_sched_list; ++i) {
sched = sched_list[i];
if (!sched->ready) {
DRM_WARN("scheduler %s is not ready, skipping",
sched->name);
continue;
}
num_score = atomic_read(sched->score);
if (num_score < min_score) {
min_score = num_score;
picked_sched = sched;
}
}
return picked_sched;
}
EXPORT_SYMBOL(drm_sched_pick_best);
/**
* drm_sched_blocked - check if the scheduler is blocked
*
* @sched: scheduler instance
*
* Returns true if blocked, otherwise false.
*/
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
kthread_parkme();
return true;
}
return false;
}
/**
* drm_sched_main - main scheduler thread
*
* @param: scheduler instance
*
* Returns 0.
*/
static int drm_sched_main(void *param)
{
struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
int r;
sched_set_fifo_low(current);
while (!kthread_should_stop()) {
struct drm_sched_entity *entity = NULL;
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
(cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
if (cleanup_job)
sched->ops->free_job(cleanup_job);
if (!entity)
continue;
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
complete_all(&entity->entity_idle);
continue;
}
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
drm_sched_job_begin(sched_job);
trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
/* Drop for original kref_init of the fence */
dma_fence_put(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
}
wake_up(&sched->job_scheduled);
}
return 0;
}
/**
* drm_sched_init - Init a gpu scheduler instance
*
* @sched: scheduler instance
* @ops: backend operations for this scheduler
* @hw_submission: number of hw submissions that can be in flight
* @hang_limit: number of times to allow a job to hang before dropping it
* @timeout: timeout value in jiffies for the scheduler
* @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
* used
* @score: optional score atomic shared with other schedulers
* @name: name used for debugging
* @dev: target &struct device
*
* Return 0 on success, otherwise error code.
*/
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev)
{
int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
sched->timeout = timeout;
sched->timeout_wq = timeout_wq ? : system_wq;
sched->hang_limit = hang_limit;
sched->score = score ? score : &sched->_score;
sched->dev = dev;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
atomic_set(&sched->_score, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
ret = PTR_ERR(sched->thread);
sched->thread = NULL;
DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
return ret;
}
sched->ready = true;
return 0;
}
EXPORT_SYMBOL(drm_sched_init);
/**
* drm_sched_fini - Destroy a gpu scheduler
*
* @sched: scheduler instance
*
* Tears down and cleans up the scheduler.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
struct drm_sched_entity *s_entity;
int i;
if (sched->thread)
kthread_stop(sched->thread);
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list)
/*
* Prevents reinsertion and marks job_queue as idle,
* it will removed from rq in drm_sched_entity_fini
* eventually
*/
s_entity->stopped = true;
spin_unlock(&rq->lock);
}
/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
wake_up_all(&sched->job_scheduled);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
/**
* drm_sched_increase_karma - Update sched_entity guilty flag
*
* @bad: The job guilty of time out
*
* Increment on every hang caused by the 'bad' job. If this exceeds the hang
* limit of the scheduler then the respective sched entity is marked guilty and
* jobs from it will not be scheduled further
*/
void drm_sched_increase_karma(struct drm_sched_job *bad)
{
int i;
struct drm_sched_entity *tmp;
struct drm_sched_entity *entity;
struct drm_gpu_scheduler *sched = bad->sched;
/* don't change @bad's karma if it's from KERNEL RQ,
* because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
* corrupt but keep in mind that kernel jobs always considered good.
*/
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
i++) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
if (bad->s_fence->scheduled.context ==
entity->fence_context) {
if (entity->guilty)
atomic_set(entity->guilty, 1);
break;
}
}
spin_unlock(&rq->lock);
if (&entity->list != &rq->entities)
break;
}
}
}
EXPORT_SYMBOL(drm_sched_increase_karma);
| linux-master | drivers/gpu/drm/scheduler/sched_main.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_vblank.h>
#include "lsdc_drv.h"
/*
* After the CRTC soft reset, the vblank counter would be reset to zero.
* But the address and other settings in the CRTC register remain the same
* as before.
*/
static void lsdc_crtc0_soft_reset(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val;
val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
val &= CFG_VALID_BITS_MASK;
/* Soft reset bit, active low */
val &= ~CFG_RESET_N;
val &= ~CFG_PIX_FMT_MASK;
lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
udelay(1);
val |= CFG_RESET_N | LSDC_PF_XRGB8888 | CFG_OUTPUT_ENABLE;
lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
/* Wait about a vblank time */
mdelay(20);
}
static void lsdc_crtc1_soft_reset(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val;
val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
val &= CFG_VALID_BITS_MASK;
/* Soft reset bit, active low */
val &= ~CFG_RESET_N;
val &= ~CFG_PIX_FMT_MASK;
lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
udelay(1);
val |= CFG_RESET_N | LSDC_PF_XRGB8888 | CFG_OUTPUT_ENABLE;
lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
/* Wait about a vblank time */
msleep(20);
}
static void lsdc_crtc0_enable(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val;
val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
/*
* This may happen in extremely rare cases, but a soft reset can
* bring it back to normal. We add a warning here, hoping to catch
* something if it happens.
*/
if (val & CRTC_ANCHORED) {
drm_warn(&ldev->base, "%s stall\n", lcrtc->base.name);
return lsdc_crtc0_soft_reset(lcrtc);
}
lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val | CFG_OUTPUT_ENABLE);
}
static void lsdc_crtc0_disable(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_clr(ldev, LSDC_CRTC0_CFG_REG, CFG_OUTPUT_ENABLE);
udelay(9);
}
static void lsdc_crtc1_enable(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val;
/*
* This may happen in extremely rare cases, but a soft reset can
* bring it back to normal. We add a warning here, hoping to catch
* something if it happens.
*/
val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
if (val & CRTC_ANCHORED) {
drm_warn(&ldev->base, "%s stall\n", lcrtc->base.name);
return lsdc_crtc1_soft_reset(lcrtc);
}
lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val | CFG_OUTPUT_ENABLE);
}
static void lsdc_crtc1_disable(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_clr(ldev, LSDC_CRTC1_CFG_REG, CFG_OUTPUT_ENABLE);
udelay(9);
}
/* All Loongson display controllers have hardware scanout position recoders */
static void lsdc_crtc0_scan_pos(struct lsdc_crtc *lcrtc, int *hpos, int *vpos)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val;
val = lsdc_rreg32(ldev, LSDC_CRTC0_SCAN_POS_REG);
*hpos = val >> 16;
*vpos = val & 0xffff;
}
static void lsdc_crtc1_scan_pos(struct lsdc_crtc *lcrtc, int *hpos, int *vpos)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val;
val = lsdc_rreg32(ldev, LSDC_CRTC1_SCAN_POS_REG);
*hpos = val >> 16;
*vpos = val & 0xffff;
}
static void lsdc_crtc0_enable_vblank(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_set(ldev, LSDC_INT_REG, INT_CRTC0_VSYNC_EN);
}
static void lsdc_crtc0_disable_vblank(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_clr(ldev, LSDC_INT_REG, INT_CRTC0_VSYNC_EN);
}
static void lsdc_crtc1_enable_vblank(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_set(ldev, LSDC_INT_REG, INT_CRTC1_VSYNC_EN);
}
static void lsdc_crtc1_disable_vblank(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_clr(ldev, LSDC_INT_REG, INT_CRTC1_VSYNC_EN);
}
static void lsdc_crtc0_flip(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_set(ldev, LSDC_CRTC0_CFG_REG, CFG_PAGE_FLIP);
}
static void lsdc_crtc1_flip(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_set(ldev, LSDC_CRTC1_CFG_REG, CFG_PAGE_FLIP);
}
/*
* CRTC0 clone from CRTC1 or CRTC1 clone from CRTC0 using hardware logic
* This may be useful for custom cloning (TWIN) applications. Saving the
* bandwidth compared with the clone (mirroring) display mode provided by
* drm core.
*/
static void lsdc_crtc0_clone(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_set(ldev, LSDC_CRTC0_CFG_REG, CFG_HW_CLONE);
}
static void lsdc_crtc1_clone(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_ureg32_set(ldev, LSDC_CRTC1_CFG_REG, CFG_HW_CLONE);
}
static void lsdc_crtc0_set_mode(struct lsdc_crtc *lcrtc,
const struct drm_display_mode *mode)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_wreg32(ldev, LSDC_CRTC0_HDISPLAY_REG,
(mode->crtc_htotal << 16) | mode->crtc_hdisplay);
lsdc_wreg32(ldev, LSDC_CRTC0_VDISPLAY_REG,
(mode->crtc_vtotal << 16) | mode->crtc_vdisplay);
lsdc_wreg32(ldev, LSDC_CRTC0_HSYNC_REG,
(mode->crtc_hsync_end << 16) | mode->crtc_hsync_start | HSYNC_EN);
lsdc_wreg32(ldev, LSDC_CRTC0_VSYNC_REG,
(mode->crtc_vsync_end << 16) | mode->crtc_vsync_start | VSYNC_EN);
}
static void lsdc_crtc1_set_mode(struct lsdc_crtc *lcrtc,
const struct drm_display_mode *mode)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_wreg32(ldev, LSDC_CRTC1_HDISPLAY_REG,
(mode->crtc_htotal << 16) | mode->crtc_hdisplay);
lsdc_wreg32(ldev, LSDC_CRTC1_VDISPLAY_REG,
(mode->crtc_vtotal << 16) | mode->crtc_vdisplay);
lsdc_wreg32(ldev, LSDC_CRTC1_HSYNC_REG,
(mode->crtc_hsync_end << 16) | mode->crtc_hsync_start | HSYNC_EN);
lsdc_wreg32(ldev, LSDC_CRTC1_VSYNC_REG,
(mode->crtc_vsync_end << 16) | mode->crtc_vsync_start | VSYNC_EN);
}
/*
* This is required for S3 support.
* After resuming from suspend, LSDC_CRTCx_CFG_REG (x = 0 or 1) is filled
* with garbage value, which causes the CRTC hang there.
*
* This function provides minimal settings for the affected registers.
* This overrides the firmware's settings on startup, making the CRTC work
* on our own, similar to the functional of GPU POST (Power On Self Test).
* Only touch CRTC hardware-related parts.
*/
static void lsdc_crtc0_reset(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, CFG_RESET_N | LSDC_PF_XRGB8888);
}
static void lsdc_crtc1_reset(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, CFG_RESET_N | LSDC_PF_XRGB8888);
}
static const struct lsdc_crtc_hw_ops ls7a1000_crtc_hw_ops[2] = {
{
.enable = lsdc_crtc0_enable,
.disable = lsdc_crtc0_disable,
.enable_vblank = lsdc_crtc0_enable_vblank,
.disable_vblank = lsdc_crtc0_disable_vblank,
.flip = lsdc_crtc0_flip,
.clone = lsdc_crtc0_clone,
.set_mode = lsdc_crtc0_set_mode,
.get_scan_pos = lsdc_crtc0_scan_pos,
.soft_reset = lsdc_crtc0_soft_reset,
.reset = lsdc_crtc0_reset,
},
{
.enable = lsdc_crtc1_enable,
.disable = lsdc_crtc1_disable,
.enable_vblank = lsdc_crtc1_enable_vblank,
.disable_vblank = lsdc_crtc1_disable_vblank,
.flip = lsdc_crtc1_flip,
.clone = lsdc_crtc1_clone,
.set_mode = lsdc_crtc1_set_mode,
.get_scan_pos = lsdc_crtc1_scan_pos,
.soft_reset = lsdc_crtc1_soft_reset,
.reset = lsdc_crtc1_reset,
},
};
/*
* The 32-bit hardware vblank counter has been available since LS7A2000
* and LS2K2000. The counter increases even though the CRTC is disabled,
* it will be reset only if the CRTC is being soft reset.
* Those registers are also readable for ls7a1000, but its value does not
* change.
*/
static u32 lsdc_crtc0_get_vblank_count(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
return lsdc_rreg32(ldev, LSDC_CRTC0_VSYNC_COUNTER_REG);
}
static u32 lsdc_crtc1_get_vblank_count(struct lsdc_crtc *lcrtc)
{
struct lsdc_device *ldev = lcrtc->ldev;
return lsdc_rreg32(ldev, LSDC_CRTC1_VSYNC_COUNTER_REG);
}
/*
* The DMA step bit fields are available since LS7A2000/LS2K2000, for
* supporting odd resolutions. But a large DMA step save the bandwidth.
* The larger, the better. Behavior of writing those bits on LS7A1000
* or LS2K1000 is underfined.
*/
static void lsdc_crtc0_set_dma_step(struct lsdc_crtc *lcrtc,
enum lsdc_dma_steps dma_step)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
val &= ~CFG_DMA_STEP_MASK;
val |= dma_step << CFG_DMA_STEP_SHIFT;
lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
}
static void lsdc_crtc1_set_dma_step(struct lsdc_crtc *lcrtc,
enum lsdc_dma_steps dma_step)
{
struct lsdc_device *ldev = lcrtc->ldev;
u32 val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
val &= ~CFG_DMA_STEP_MASK;
val |= dma_step << CFG_DMA_STEP_SHIFT;
lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
}
static const struct lsdc_crtc_hw_ops ls7a2000_crtc_hw_ops[2] = {
{
.enable = lsdc_crtc0_enable,
.disable = lsdc_crtc0_disable,
.enable_vblank = lsdc_crtc0_enable_vblank,
.disable_vblank = lsdc_crtc0_disable_vblank,
.flip = lsdc_crtc0_flip,
.clone = lsdc_crtc0_clone,
.set_mode = lsdc_crtc0_set_mode,
.soft_reset = lsdc_crtc0_soft_reset,
.get_scan_pos = lsdc_crtc0_scan_pos,
.set_dma_step = lsdc_crtc0_set_dma_step,
.get_vblank_counter = lsdc_crtc0_get_vblank_count,
.reset = lsdc_crtc0_reset,
},
{
.enable = lsdc_crtc1_enable,
.disable = lsdc_crtc1_disable,
.enable_vblank = lsdc_crtc1_enable_vblank,
.disable_vblank = lsdc_crtc1_disable_vblank,
.flip = lsdc_crtc1_flip,
.clone = lsdc_crtc1_clone,
.set_mode = lsdc_crtc1_set_mode,
.get_scan_pos = lsdc_crtc1_scan_pos,
.soft_reset = lsdc_crtc1_soft_reset,
.set_dma_step = lsdc_crtc1_set_dma_step,
.get_vblank_counter = lsdc_crtc1_get_vblank_count,
.reset = lsdc_crtc1_reset,
},
};
static void lsdc_crtc_reset(struct drm_crtc *crtc)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
struct lsdc_crtc_state *priv_crtc_state;
if (crtc->state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
priv_crtc_state = kzalloc(sizeof(*priv_crtc_state), GFP_KERNEL);
if (!priv_crtc_state)
__drm_atomic_helper_crtc_reset(crtc, NULL);
else
__drm_atomic_helper_crtc_reset(crtc, &priv_crtc_state->base);
/* Reset the CRTC hardware, this is required for S3 support */
ops->reset(lcrtc);
}
static void lsdc_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(&priv_state->base);
kfree(priv_state);
}
static struct drm_crtc_state *
lsdc_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct lsdc_crtc_state *new_priv_state;
struct lsdc_crtc_state *old_priv_state;
new_priv_state = kzalloc(sizeof(*new_priv_state), GFP_KERNEL);
if (!new_priv_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &new_priv_state->base);
old_priv_state = to_lsdc_crtc_state(crtc->state);
memcpy(&new_priv_state->pparms, &old_priv_state->pparms,
sizeof(new_priv_state->pparms));
return &new_priv_state->base;
}
static u32 lsdc_crtc_get_vblank_counter(struct drm_crtc *crtc)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
/* 32-bit hardware vblank counter */
return lcrtc->hw_ops->get_vblank_counter(lcrtc);
}
static int lsdc_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
if (!lcrtc->has_vblank)
return -EINVAL;
lcrtc->hw_ops->enable_vblank(lcrtc);
return 0;
}
static void lsdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
if (!lcrtc->has_vblank)
return;
lcrtc->hw_ops->disable_vblank(lcrtc);
}
/*
* CRTC related debugfs
* Primary planes and cursor planes belong to the CRTC as well.
* For the sake of convenience, plane-related registers are also add here.
*/
#define REG_DEF(reg) { \
.name = __stringify_1(LSDC_##reg##_REG), \
.offset = LSDC_##reg##_REG, \
}
static const struct lsdc_reg32 lsdc_crtc_regs_array[2][21] = {
[0] = {
REG_DEF(CRTC0_CFG),
REG_DEF(CRTC0_FB_ORIGIN),
REG_DEF(CRTC0_DVO_CONF),
REG_DEF(CRTC0_HDISPLAY),
REG_DEF(CRTC0_HSYNC),
REG_DEF(CRTC0_VDISPLAY),
REG_DEF(CRTC0_VSYNC),
REG_DEF(CRTC0_GAMMA_INDEX),
REG_DEF(CRTC0_GAMMA_DATA),
REG_DEF(CRTC0_SYNC_DEVIATION),
REG_DEF(CRTC0_VSYNC_COUNTER),
REG_DEF(CRTC0_SCAN_POS),
REG_DEF(CRTC0_STRIDE),
REG_DEF(CRTC0_FB1_ADDR_HI),
REG_DEF(CRTC0_FB1_ADDR_LO),
REG_DEF(CRTC0_FB0_ADDR_HI),
REG_DEF(CRTC0_FB0_ADDR_LO),
REG_DEF(CURSOR0_CFG),
REG_DEF(CURSOR0_POSITION),
REG_DEF(CURSOR0_BG_COLOR),
REG_DEF(CURSOR0_FG_COLOR),
},
[1] = {
REG_DEF(CRTC1_CFG),
REG_DEF(CRTC1_FB_ORIGIN),
REG_DEF(CRTC1_DVO_CONF),
REG_DEF(CRTC1_HDISPLAY),
REG_DEF(CRTC1_HSYNC),
REG_DEF(CRTC1_VDISPLAY),
REG_DEF(CRTC1_VSYNC),
REG_DEF(CRTC1_GAMMA_INDEX),
REG_DEF(CRTC1_GAMMA_DATA),
REG_DEF(CRTC1_SYNC_DEVIATION),
REG_DEF(CRTC1_VSYNC_COUNTER),
REG_DEF(CRTC1_SCAN_POS),
REG_DEF(CRTC1_STRIDE),
REG_DEF(CRTC1_FB1_ADDR_HI),
REG_DEF(CRTC1_FB1_ADDR_LO),
REG_DEF(CRTC1_FB0_ADDR_HI),
REG_DEF(CRTC1_FB0_ADDR_LO),
REG_DEF(CURSOR1_CFG),
REG_DEF(CURSOR1_POSITION),
REG_DEF(CURSOR1_BG_COLOR),
REG_DEF(CURSOR1_FG_COLOR),
},
};
static int lsdc_crtc_show_regs(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
struct lsdc_device *ldev = lcrtc->ldev;
unsigned int i;
for (i = 0; i < lcrtc->nreg; i++) {
const struct lsdc_reg32 *preg = &lcrtc->preg[i];
u32 offset = preg->offset;
seq_printf(m, "%s (0x%04x): 0x%08x\n",
preg->name, offset, lsdc_rreg32(ldev, offset));
}
return 0;
}
static int lsdc_crtc_show_scan_position(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
int x, y;
lcrtc->hw_ops->get_scan_pos(lcrtc, &x, &y);
seq_printf(m, "Scanout position: x: %08u, y: %08u\n", x, y);
return 0;
}
static int lsdc_crtc_show_vblank_counter(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
if (lcrtc->hw_ops->get_vblank_counter)
seq_printf(m, "%s vblank counter: %08u\n\n", lcrtc->base.name,
lcrtc->hw_ops->get_vblank_counter(lcrtc));
return 0;
}
static int lsdc_pixpll_show_clock(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
const struct lsdc_pixpll_funcs *funcs = pixpll->funcs;
struct drm_crtc *crtc = &lcrtc->base;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_printer printer = drm_seq_file_printer(m);
unsigned int out_khz;
out_khz = funcs->get_rate(pixpll);
seq_printf(m, "%s: %dx%d@%d\n", crtc->name,
mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode));
seq_printf(m, "Pixel clock required: %d kHz\n", mode->clock);
seq_printf(m, "Actual frequency output: %u kHz\n", out_khz);
seq_printf(m, "Diff: %d kHz\n", out_khz - mode->clock);
funcs->print(pixpll, &printer);
return 0;
}
static struct drm_info_list lsdc_crtc_debugfs_list[2][4] = {
[0] = {
{ "regs", lsdc_crtc_show_regs, 0, NULL },
{ "pixclk", lsdc_pixpll_show_clock, 0, NULL },
{ "scanpos", lsdc_crtc_show_scan_position, 0, NULL },
{ "vblanks", lsdc_crtc_show_vblank_counter, 0, NULL },
},
[1] = {
{ "regs", lsdc_crtc_show_regs, 0, NULL },
{ "pixclk", lsdc_pixpll_show_clock, 0, NULL },
{ "scanpos", lsdc_crtc_show_scan_position, 0, NULL },
{ "vblanks", lsdc_crtc_show_vblank_counter, 0, NULL },
},
};
/* operate manually */
static int lsdc_crtc_man_op_show(struct seq_file *m, void *data)
{
seq_puts(m, "soft_reset: soft reset this CRTC\n");
seq_puts(m, "enable: enable this CRTC\n");
seq_puts(m, "disable: disable this CRTC\n");
seq_puts(m, "flip: trigger the page flip\n");
seq_puts(m, "clone: clone the another crtc with hardware logic\n");
return 0;
}
static int lsdc_crtc_man_op_open(struct inode *inode, struct file *file)
{
struct drm_crtc *crtc = inode->i_private;
return single_open(file, lsdc_crtc_man_op_show, crtc);
}
static ssize_t lsdc_crtc_man_op_write(struct file *file,
const char __user *ubuf,
size_t len,
loff_t *offp)
{
struct seq_file *m = file->private_data;
struct lsdc_crtc *lcrtc = m->private;
const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
char buf[16];
if (len > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, len))
return -EFAULT;
buf[len] = '\0';
if (sysfs_streq(buf, "soft_reset"))
ops->soft_reset(lcrtc);
else if (sysfs_streq(buf, "enable"))
ops->enable(lcrtc);
else if (sysfs_streq(buf, "disable"))
ops->disable(lcrtc);
else if (sysfs_streq(buf, "flip"))
ops->flip(lcrtc);
else if (sysfs_streq(buf, "clone"))
ops->clone(lcrtc);
return len;
}
static const struct file_operations lsdc_crtc_man_op_fops = {
.owner = THIS_MODULE,
.open = lsdc_crtc_man_op_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = lsdc_crtc_man_op_write,
};
static int lsdc_crtc_late_register(struct drm_crtc *crtc)
{
struct lsdc_display_pipe *dispipe = crtc_to_display_pipe(crtc);
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
struct drm_minor *minor = crtc->dev->primary;
unsigned int index = dispipe->index;
unsigned int i;
lcrtc->preg = lsdc_crtc_regs_array[index];
lcrtc->nreg = ARRAY_SIZE(lsdc_crtc_regs_array[index]);
lcrtc->p_info_list = lsdc_crtc_debugfs_list[index];
lcrtc->n_info_list = ARRAY_SIZE(lsdc_crtc_debugfs_list[index]);
for (i = 0; i < lcrtc->n_info_list; ++i)
lcrtc->p_info_list[i].data = lcrtc;
drm_debugfs_create_files(lcrtc->p_info_list, lcrtc->n_info_list,
crtc->debugfs_entry, minor);
/* Manual operations supported */
debugfs_create_file("ops", 0644, crtc->debugfs_entry, lcrtc,
&lsdc_crtc_man_op_fops);
return 0;
}
static void lsdc_crtc_atomic_print_state(struct drm_printer *p,
const struct drm_crtc_state *state)
{
const struct lsdc_crtc_state *priv_state;
const struct lsdc_pixpll_parms *pparms;
priv_state = container_of_const(state, struct lsdc_crtc_state, base);
pparms = &priv_state->pparms;
drm_printf(p, "\tInput clock divider = %u\n", pparms->div_ref);
drm_printf(p, "\tMedium clock multiplier = %u\n", pparms->loopc);
drm_printf(p, "\tOutput clock divider = %u\n", pparms->div_out);
}
static const struct drm_crtc_funcs ls7a1000_crtc_funcs = {
.reset = lsdc_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = lsdc_crtc_atomic_duplicate_state,
.atomic_destroy_state = lsdc_crtc_atomic_destroy_state,
.late_register = lsdc_crtc_late_register,
.enable_vblank = lsdc_crtc_enable_vblank,
.disable_vblank = lsdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.atomic_print_state = lsdc_crtc_atomic_print_state,
};
static const struct drm_crtc_funcs ls7a2000_crtc_funcs = {
.reset = lsdc_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = lsdc_crtc_atomic_duplicate_state,
.atomic_destroy_state = lsdc_crtc_atomic_destroy_state,
.late_register = lsdc_crtc_late_register,
.get_vblank_counter = lsdc_crtc_get_vblank_counter,
.enable_vblank = lsdc_crtc_enable_vblank,
.disable_vblank = lsdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.atomic_print_state = lsdc_crtc_atomic_print_state,
};
static enum drm_mode_status
lsdc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct drm_device *ddev = crtc->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
const struct lsdc_desc *descp = ldev->descp;
unsigned int pitch;
if (mode->hdisplay > descp->max_width)
return MODE_BAD_HVALUE;
if (mode->vdisplay > descp->max_height)
return MODE_BAD_VVALUE;
if (mode->clock > descp->max_pixel_clk) {
drm_dbg_kms(ddev, "mode %dx%d, pixel clock=%d is too high\n",
mode->hdisplay, mode->vdisplay, mode->clock);
return MODE_CLOCK_HIGH;
}
/* 4 for DRM_FORMAT_XRGB8888 */
pitch = mode->hdisplay * 4;
if (pitch % descp->pitch_align) {
drm_dbg_kms(ddev, "align to %u bytes is required: %u\n",
descp->pitch_align, pitch);
return MODE_BAD_WIDTH;
}
return MODE_OK;
}
static int lsdc_pixpll_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
const struct lsdc_pixpll_funcs *pfuncs = pixpll->funcs;
struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
unsigned int clock = state->mode.clock;
int ret;
ret = pfuncs->compute(pixpll, clock, &priv_state->pparms);
if (ret) {
drm_warn(crtc->dev, "Failed to find PLL params for %ukHz\n",
clock);
return -EINVAL;
}
return 0;
}
static int lsdc_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->enable)
return 0;
return lsdc_pixpll_atomic_check(crtc, crtc_state);
}
static void lsdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
const struct lsdc_crtc_hw_ops *crtc_hw_ops = lcrtc->hw_ops;
struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
const struct lsdc_pixpll_funcs *pixpll_funcs = pixpll->funcs;
struct drm_crtc_state *state = crtc->state;
struct drm_display_mode *mode = &state->mode;
struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
pixpll_funcs->update(pixpll, &priv_state->pparms);
if (crtc_hw_ops->set_dma_step) {
unsigned int width_in_bytes = mode->hdisplay * 4;
enum lsdc_dma_steps dma_step;
/*
* Using DMA step as large as possible, for improving
* hardware DMA efficiency.
*/
if (width_in_bytes % 256 == 0)
dma_step = LSDC_DMA_STEP_256_BYTES;
else if (width_in_bytes % 128 == 0)
dma_step = LSDC_DMA_STEP_128_BYTES;
else if (width_in_bytes % 64 == 0)
dma_step = LSDC_DMA_STEP_64_BYTES;
else /* width_in_bytes % 32 == 0 */
dma_step = LSDC_DMA_STEP_32_BYTES;
crtc_hw_ops->set_dma_step(lcrtc, dma_step);
}
crtc_hw_ops->set_mode(lcrtc, mode);
}
static void lsdc_crtc_send_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
unsigned long flags;
if (!crtc->state || !crtc->state->event)
return;
drm_dbg(ddev, "Send vblank manually\n");
spin_lock_irqsave(&ddev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&ddev->event_lock, flags);
}
static void lsdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
if (lcrtc->has_vblank)
drm_crtc_vblank_on(crtc);
lcrtc->hw_ops->enable(lcrtc);
}
static void lsdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
if (lcrtc->has_vblank)
drm_crtc_vblank_off(crtc);
lcrtc->hw_ops->disable(lcrtc);
/*
* Make sure we issue a vblank event after disabling the CRTC if
* someone was waiting it.
*/
lsdc_crtc_send_vblank(crtc);
}
static void lsdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
else
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static bool lsdc_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos,
int *hpos,
ktime_t *stime,
ktime_t *etime,
const struct drm_display_mode *mode)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
int vsw, vbp, vactive_start, vactive_end, vfp_end;
int x, y;
vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
vactive_start = vsw + vbp + 1;
vactive_end = vactive_start + mode->crtc_vdisplay;
/* last scan line before VSYNC */
vfp_end = mode->crtc_vtotal;
if (stime)
*stime = ktime_get();
ops->get_scan_pos(lcrtc, &x, &y);
if (y > vactive_end)
y = y - vfp_end - vactive_start;
else
y -= vactive_start;
*vpos = y;
*hpos = 0;
if (etime)
*etime = ktime_get();
return true;
}
static const struct drm_crtc_helper_funcs lsdc_crtc_helper_funcs = {
.mode_valid = lsdc_crtc_mode_valid,
.mode_set_nofb = lsdc_crtc_mode_set_nofb,
.atomic_enable = lsdc_crtc_atomic_enable,
.atomic_disable = lsdc_crtc_atomic_disable,
.atomic_check = lsdc_crtc_helper_atomic_check,
.atomic_flush = lsdc_crtc_atomic_flush,
.get_scanout_position = lsdc_crtc_get_scanout_position,
};
int ls7a1000_crtc_init(struct drm_device *ddev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
unsigned int index,
bool has_vblank)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
int ret;
ret = lsdc_pixpll_init(&lcrtc->pixpll, ddev, index);
if (ret) {
drm_err(ddev, "pixel pll init failed: %d\n", ret);
return ret;
}
lcrtc->ldev = to_lsdc(ddev);
lcrtc->has_vblank = has_vblank;
lcrtc->hw_ops = &ls7a1000_crtc_hw_ops[index];
ret = drm_crtc_init_with_planes(ddev, crtc, primary, cursor,
&ls7a1000_crtc_funcs,
"LS-CRTC-%d", index);
if (ret) {
drm_err(ddev, "crtc init with planes failed: %d\n", ret);
return ret;
}
drm_crtc_helper_add(crtc, &lsdc_crtc_helper_funcs);
ret = drm_mode_crtc_set_gamma_size(crtc, 256);
if (ret)
return ret;
drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
return 0;
}
int ls7a2000_crtc_init(struct drm_device *ddev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
unsigned int index,
bool has_vblank)
{
struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
int ret;
ret = lsdc_pixpll_init(&lcrtc->pixpll, ddev, index);
if (ret) {
drm_err(ddev, "crtc init with pll failed: %d\n", ret);
return ret;
}
lcrtc->ldev = to_lsdc(ddev);
lcrtc->has_vblank = has_vblank;
lcrtc->hw_ops = &ls7a2000_crtc_hw_ops[index];
ret = drm_crtc_init_with_planes(ddev, crtc, primary, cursor,
&ls7a2000_crtc_funcs,
"LS-CRTC-%u", index);
if (ret) {
drm_err(ddev, "crtc init with planes failed: %d\n", ret);
return ret;
}
drm_crtc_helper_add(crtc, &lsdc_crtc_helper_funcs);
ret = drm_mode_crtc_set_gamma_size(crtc, 256);
if (ret)
return ret;
drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_crtc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "lsdc_drv.h"
/*
* GFX PLL is the PLL used by DC, GMC and GPU, the structure of the GFX PLL
* may suffer from change across chip variants.
*
*
* +-------------+ sel_out_dc
* +----| / div_out_0 | _____/ _____ DC
* | +-------------+
* refclk +---------+ +-------+ | +-------------+ sel_out_gmc
* ---+---> | div_ref | ---> | loopc | --+--> | / div_out_1 | _____/ _____ GMC
* | +---------+ +-------+ | +-------------+
* | / * | +-------------+ sel_out_gpu
* | +----| / div_out_2 | _____/ _____ GPU
* | +-------------+
* | ^
* | |
* +--------------------------- bypass ----------------------+
*/
struct loongson_gfxpll_bitmap {
/* Byte 0 ~ Byte 3 */
unsigned div_out_dc : 7; /* 6 : 0 DC output clock divider */
unsigned div_out_gmc : 7; /* 13 : 7 GMC output clock divider */
unsigned div_out_gpu : 7; /* 20 : 14 GPU output clock divider */
unsigned loopc : 9; /* 29 : 21 clock multiplier */
unsigned _reserved_1_ : 2; /* 31 : 30 */
/* Byte 4 ~ Byte 7 */
unsigned div_ref : 7; /* 38 : 32 Input clock divider */
unsigned locked : 1; /* 39 PLL locked indicator */
unsigned sel_out_dc : 1; /* 40 dc output clk enable */
unsigned sel_out_gmc : 1; /* 41 gmc output clk enable */
unsigned sel_out_gpu : 1; /* 42 gpu output clk enable */
unsigned set_param : 1; /* 43 Trigger the update */
unsigned bypass : 1; /* 44 */
unsigned powerdown : 1; /* 45 */
unsigned _reserved_2_ : 18; /* 46 : 63 no use */
};
union loongson_gfxpll_reg_bitmap {
struct loongson_gfxpll_bitmap bitmap;
u32 w[2];
u64 d;
};
static void __gfxpll_rreg(struct loongson_gfxpll *this,
union loongson_gfxpll_reg_bitmap *reg)
{
#if defined(CONFIG_64BIT)
reg->d = readq(this->mmio);
#else
reg->w[0] = readl(this->mmio);
reg->w[1] = readl(this->mmio + 4);
#endif
}
/* Update new parameters to the hardware */
static int loongson_gfxpll_update(struct loongson_gfxpll * const this,
struct loongson_gfxpll_parms const *pin)
{
/* None, TODO */
return 0;
}
static void loongson_gfxpll_get_rates(struct loongson_gfxpll * const this,
unsigned int *dc,
unsigned int *gmc,
unsigned int *gpu)
{
struct loongson_gfxpll_parms *pparms = &this->parms;
union loongson_gfxpll_reg_bitmap gfxpll_reg;
unsigned int pre_output;
unsigned int dc_mhz;
unsigned int gmc_mhz;
unsigned int gpu_mhz;
__gfxpll_rreg(this, &gfxpll_reg);
pparms->div_ref = gfxpll_reg.bitmap.div_ref;
pparms->loopc = gfxpll_reg.bitmap.loopc;
pparms->div_out_dc = gfxpll_reg.bitmap.div_out_dc;
pparms->div_out_gmc = gfxpll_reg.bitmap.div_out_gmc;
pparms->div_out_gpu = gfxpll_reg.bitmap.div_out_gpu;
pre_output = pparms->ref_clock / pparms->div_ref * pparms->loopc;
dc_mhz = pre_output / pparms->div_out_dc / 1000;
gmc_mhz = pre_output / pparms->div_out_gmc / 1000;
gpu_mhz = pre_output / pparms->div_out_gpu / 1000;
if (dc)
*dc = dc_mhz;
if (gmc)
*gmc = gmc_mhz;
if (gpu)
*gpu = gpu_mhz;
}
static void loongson_gfxpll_print(struct loongson_gfxpll * const this,
struct drm_printer *p,
bool verbose)
{
struct loongson_gfxpll_parms *parms = &this->parms;
unsigned int dc, gmc, gpu;
if (verbose) {
drm_printf(p, "reference clock: %u\n", parms->ref_clock);
drm_printf(p, "div_ref = %u\n", parms->div_ref);
drm_printf(p, "loopc = %u\n", parms->loopc);
drm_printf(p, "div_out_dc = %u\n", parms->div_out_dc);
drm_printf(p, "div_out_gmc = %u\n", parms->div_out_gmc);
drm_printf(p, "div_out_gpu = %u\n", parms->div_out_gpu);
}
this->funcs->get_rates(this, &dc, &gmc, &gpu);
drm_printf(p, "dc: %uMHz, gmc: %uMHz, gpu: %uMHz\n", dc, gmc, gpu);
}
/* GFX (DC, GPU, GMC) PLL initialization and destroy function */
static void loongson_gfxpll_fini(struct drm_device *ddev, void *data)
{
struct loongson_gfxpll *this = (struct loongson_gfxpll *)data;
iounmap(this->mmio);
kfree(this);
}
static int loongson_gfxpll_init(struct loongson_gfxpll * const this)
{
struct loongson_gfxpll_parms *pparms = &this->parms;
struct drm_printer printer = drm_info_printer(this->ddev->dev);
pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
this->mmio = ioremap(this->reg_base, this->reg_size);
if (IS_ERR_OR_NULL(this->mmio))
return -ENOMEM;
this->funcs->print(this, &printer, false);
return 0;
}
static const struct loongson_gfxpll_funcs lsdc_gmc_gpu_funcs = {
.init = loongson_gfxpll_init,
.update = loongson_gfxpll_update,
.get_rates = loongson_gfxpll_get_rates,
.print = loongson_gfxpll_print,
};
int loongson_gfxpll_create(struct drm_device *ddev,
struct loongson_gfxpll **ppout)
{
struct lsdc_device *ldev = to_lsdc(ddev);
const struct loongson_gfx_desc *gfx = to_loongson_gfx(ldev->descp);
struct loongson_gfxpll *this;
int ret;
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (IS_ERR_OR_NULL(this))
return -ENOMEM;
this->ddev = ddev;
this->reg_size = gfx->gfxpll.reg_size;
this->reg_base = gfx->conf_reg_base + gfx->gfxpll.reg_offset;
this->funcs = &lsdc_gmc_gpu_funcs;
ret = this->funcs->init(this);
if (unlikely(ret)) {
kfree(this);
return ret;
}
*ppout = this;
return drmm_add_action_or_reset(ddev, loongson_gfxpll_fini, this);
}
| linux-master | drivers/gpu/drm/loongson/lsdc_gfxpll.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <drm/drm_managed.h>
#include "lsdc_drv.h"
/*
* The structure of the pixel PLL registers is evolved with times,
* it can be different across different chip also.
*/
/* size is u64, note that all loongson's cpu is little endian.
* This structure is same for ls7a2000, ls7a1000 and ls2k2000.
*/
struct lsdc_pixpll_reg {
/* Byte 0 ~ Byte 3 */
unsigned div_out : 7; /* 6 : 0 Output clock divider */
unsigned _reserved_1_ : 14; /* 20 : 7 */
unsigned loopc : 9; /* 29 : 21 Clock multiplier */
unsigned _reserved_2_ : 2; /* 31 : 30 */
/* Byte 4 ~ Byte 7 */
unsigned div_ref : 7; /* 38 : 32 Input clock divider */
unsigned locked : 1; /* 39 PLL locked indicator */
unsigned sel_out : 1; /* 40 output clk selector */
unsigned _reserved_3_ : 2; /* 42 : 41 */
unsigned set_param : 1; /* 43 Trigger the update */
unsigned bypass : 1; /* 44 */
unsigned powerdown : 1; /* 45 */
unsigned _reserved_4_ : 18; /* 46 : 63 no use */
};
union lsdc_pixpll_reg_bitmap {
struct lsdc_pixpll_reg bitmap;
u32 w[2];
u64 d;
};
struct clk_to_pixpll_parms_lookup_t {
unsigned int clock; /* kHz */
unsigned short width;
unsigned short height;
unsigned short vrefresh;
/* Stores parameters for programming the Hardware PLLs */
unsigned short div_out;
unsigned short loopc;
unsigned short div_ref;
};
static const struct clk_to_pixpll_parms_lookup_t pixpll_parms_table[] = {
{148500, 1920, 1080, 60, 11, 49, 3}, /* 1920x1080@60Hz */
{141750, 1920, 1080, 60, 11, 78, 5}, /* 1920x1080@60Hz */
/* 1920x1080@50Hz */
{174500, 1920, 1080, 75, 17, 89, 3}, /* 1920x1080@75Hz */
{181250, 2560, 1080, 75, 8, 58, 4}, /* 2560x1080@75Hz */
{297000, 2560, 1080, 30, 8, 95, 4}, /* 3840x2160@30Hz */
{301992, 1920, 1080, 100, 10, 151, 5}, /* 1920x1080@100Hz */
{146250, 1680, 1050, 60, 16, 117, 5}, /* 1680x1050@60Hz */
{135000, 1280, 1024, 75, 10, 54, 4}, /* 1280x1024@75Hz */
{119000, 1680, 1050, 60, 20, 119, 5}, /* 1680x1050@60Hz */
{108000, 1600, 900, 60, 15, 81, 5}, /* 1600x900@60Hz */
/* 1280x1024@60Hz */
/* 1280x960@60Hz */
/* 1152x864@75Hz */
{106500, 1440, 900, 60, 19, 81, 4}, /* 1440x900@60Hz */
{88750, 1440, 900, 60, 16, 71, 5}, /* 1440x900@60Hz */
{83500, 1280, 800, 60, 17, 71, 5}, /* 1280x800@60Hz */
{71000, 1280, 800, 60, 20, 71, 5}, /* 1280x800@60Hz */
{74250, 1280, 720, 60, 22, 49, 3}, /* 1280x720@60Hz */
/* 1280x720@50Hz */
{78750, 1024, 768, 75, 16, 63, 5}, /* 1024x768@75Hz */
{75000, 1024, 768, 70, 29, 87, 4}, /* 1024x768@70Hz */
{65000, 1024, 768, 60, 20, 39, 3}, /* 1024x768@60Hz */
{51200, 1024, 600, 60, 25, 64, 5}, /* 1024x600@60Hz */
{57284, 832, 624, 75, 24, 55, 4}, /* 832x624@75Hz */
{49500, 800, 600, 75, 40, 99, 5}, /* 800x600@75Hz */
{50000, 800, 600, 72, 44, 88, 4}, /* 800x600@72Hz */
{40000, 800, 600, 60, 30, 36, 3}, /* 800x600@60Hz */
{36000, 800, 600, 56, 50, 72, 4}, /* 800x600@56Hz */
{31500, 640, 480, 75, 40, 63, 5}, /* 640x480@75Hz */
/* 640x480@73Hz */
{30240, 640, 480, 67, 62, 75, 4}, /* 640x480@67Hz */
{27000, 720, 576, 50, 50, 54, 4}, /* 720x576@60Hz */
{25175, 640, 480, 60, 85, 107, 5}, /* 640x480@60Hz */
{25200, 640, 480, 60, 50, 63, 5}, /* 640x480@60Hz */
/* 720x480@60Hz */
};
static void lsdc_pixel_pll_free(struct drm_device *ddev, void *data)
{
struct lsdc_pixpll *this = (struct lsdc_pixpll *)data;
iounmap(this->mmio);
kfree(this->priv);
drm_dbg(ddev, "pixpll private data freed\n");
}
/*
* ioremap the device dependent PLL registers
*
* @this: point to the object where this function is called from
*/
static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this)
{
struct lsdc_pixpll_parms *pparms;
this->mmio = ioremap(this->reg_base, this->reg_size);
if (IS_ERR_OR_NULL(this->mmio))
return -ENOMEM;
pparms = kzalloc(sizeof(*pparms), GFP_KERNEL);
if (IS_ERR_OR_NULL(pparms))
return -ENOMEM;
pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
this->priv = pparms;
return drmm_add_action_or_reset(this->ddev, lsdc_pixel_pll_free, this);
}
/*
* Find a set of pll parameters from a static local table which avoid
* computing the pll parameter eachtime a modeset is triggered.
*
* @this: point to the object where this function is called from
* @clock: the desired output pixel clock, the unit is kHz
* @pout: point to where the parameters to store if found
*
* Return 0 if success, return -1 if not found.
*/
static int lsdc_pixpll_find(struct lsdc_pixpll * const this,
unsigned int clock,
struct lsdc_pixpll_parms *pout)
{
unsigned int num = ARRAY_SIZE(pixpll_parms_table);
const struct clk_to_pixpll_parms_lookup_t *pt;
unsigned int i;
for (i = 0; i < num; ++i) {
pt = &pixpll_parms_table[i];
if (clock == pt->clock) {
pout->div_ref = pt->div_ref;
pout->loopc = pt->loopc;
pout->div_out = pt->div_out;
return 0;
}
}
drm_dbg_kms(this->ddev, "pixel clock %u: miss\n", clock);
return -1;
}
/*
* Find a set of pll parameters which have minimal difference with the
* desired pixel clock frequency. It does that by computing all of the
* possible combination. Compute the diff and find the combination with
* minimal diff.
*
* clock_out = refclk / div_ref * loopc / div_out
*
* refclk is determined by the oscillator mounted on motherboard(100MHz
* in almost all board)
*
* @this: point to the object from where this function is called
* @clock: the desired output pixel clock, the unit is kHz
* @pout: point to the out struct of lsdc_pixpll_parms
*
* Return 0 if a set of parameter is found, otherwise return the error
* between clock_kHz we wanted and the most closest candidate with it.
*/
static int lsdc_pixel_pll_compute(struct lsdc_pixpll * const this,
unsigned int clock,
struct lsdc_pixpll_parms *pout)
{
struct lsdc_pixpll_parms *pparms = this->priv;
unsigned int refclk = pparms->ref_clock;
const unsigned int tolerance = 1000;
unsigned int min = tolerance;
unsigned int div_out, loopc, div_ref;
unsigned int computed;
if (!lsdc_pixpll_find(this, clock, pout))
return 0;
for (div_out = 6; div_out < 64; div_out++) {
for (div_ref = 3; div_ref < 6; div_ref++) {
for (loopc = 6; loopc < 161; loopc++) {
unsigned int diff = 0;
if (loopc < 12 * div_ref)
continue;
if (loopc > 32 * div_ref)
continue;
computed = refclk / div_ref * loopc / div_out;
if (clock >= computed)
diff = clock - computed;
else
diff = computed - clock;
if (diff < min) {
min = diff;
pparms->div_ref = div_ref;
pparms->div_out = div_out;
pparms->loopc = loopc;
if (diff == 0) {
*pout = *pparms;
return 0;
}
}
}
}
}
/* still acceptable */
if (min < tolerance) {
*pout = *pparms;
return 0;
}
drm_dbg(this->ddev, "can't find suitable params for %u khz\n", clock);
return min;
}
/* Pixel pll hardware related ops, per display pipe */
static void __pixpll_rreg(struct lsdc_pixpll *this,
union lsdc_pixpll_reg_bitmap *dst)
{
#if defined(CONFIG_64BIT)
dst->d = readq(this->mmio);
#else
dst->w[0] = readl(this->mmio);
dst->w[1] = readl(this->mmio + 4);
#endif
}
static void __pixpll_wreg(struct lsdc_pixpll *this,
union lsdc_pixpll_reg_bitmap *src)
{
#if defined(CONFIG_64BIT)
writeq(src->d, this->mmio);
#else
writel(src->w[0], this->mmio);
writel(src->w[1], this->mmio + 4);
#endif
}
static void __pixpll_ops_powerup(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.powerdown = 0;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_powerdown(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.powerdown = 1;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_on(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.sel_out = 1;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_off(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.sel_out = 0;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_bypass(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.bypass = 1;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_unbypass(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.bypass = 0;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_untoggle_param(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.set_param = 0;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_set_param(struct lsdc_pixpll * const this,
struct lsdc_pixpll_parms const *p)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.div_ref = p->div_ref;
pixpll_reg.bitmap.loopc = p->loopc;
pixpll_reg.bitmap.div_out = p->div_out;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_toggle_param(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
__pixpll_rreg(this, &pixpll_reg);
pixpll_reg.bitmap.set_param = 1;
__pixpll_wreg(this, &pixpll_reg);
}
static void __pixpll_ops_wait_locked(struct lsdc_pixpll * const this)
{
union lsdc_pixpll_reg_bitmap pixpll_reg;
unsigned int counter = 0;
do {
__pixpll_rreg(this, &pixpll_reg);
if (pixpll_reg.bitmap.locked)
break;
++counter;
} while (counter < 2000);
drm_dbg(this->ddev, "%u loop waited\n", counter);
}
/*
* Update the PLL parameters to the PLL hardware
*
* @this: point to the object from which this function is called
* @pin: point to the struct of lsdc_pixpll_parms passed in
*
* return 0 if successful.
*/
static int lsdc_pixpll_update(struct lsdc_pixpll * const this,
struct lsdc_pixpll_parms const *pin)
{
__pixpll_ops_bypass(this);
__pixpll_ops_off(this);
__pixpll_ops_powerdown(this);
__pixpll_ops_toggle_param(this);
__pixpll_ops_set_param(this, pin);
__pixpll_ops_untoggle_param(this);
__pixpll_ops_powerup(this);
udelay(2);
__pixpll_ops_wait_locked(this);
__pixpll_ops_on(this);
__pixpll_ops_unbypass(this);
return 0;
}
static unsigned int lsdc_pixpll_get_freq(struct lsdc_pixpll * const this)
{
struct lsdc_pixpll_parms *ppar = this->priv;
union lsdc_pixpll_reg_bitmap pix_pll_reg;
unsigned int freq;
__pixpll_rreg(this, &pix_pll_reg);
ppar->div_ref = pix_pll_reg.bitmap.div_ref;
ppar->loopc = pix_pll_reg.bitmap.loopc;
ppar->div_out = pix_pll_reg.bitmap.div_out;
freq = ppar->ref_clock / ppar->div_ref * ppar->loopc / ppar->div_out;
return freq;
}
static void lsdc_pixpll_print(struct lsdc_pixpll * const this,
struct drm_printer *p)
{
struct lsdc_pixpll_parms *parms = this->priv;
drm_printf(p, "div_ref: %u, loopc: %u, div_out: %u\n",
parms->div_ref, parms->loopc, parms->div_out);
}
/*
* LS7A1000, LS7A2000 and ls2k2000's pixel pll setting register is same,
* we take this as default, create a new instance if a different model is
* introduced.
*/
static const struct lsdc_pixpll_funcs __pixpll_default_funcs = {
.setup = lsdc_pixel_pll_setup,
.compute = lsdc_pixel_pll_compute,
.update = lsdc_pixpll_update,
.get_rate = lsdc_pixpll_get_freq,
.print = lsdc_pixpll_print,
};
/* pixel pll initialization */
int lsdc_pixpll_init(struct lsdc_pixpll * const this,
struct drm_device *ddev,
unsigned int index)
{
struct lsdc_device *ldev = to_lsdc(ddev);
const struct lsdc_desc *descp = ldev->descp;
const struct loongson_gfx_desc *gfx = to_loongson_gfx(descp);
this->ddev = ddev;
this->reg_size = 8;
this->reg_base = gfx->conf_reg_base + gfx->pixpll[index].reg_offset;
this->funcs = &__pixpll_default_funcs;
return this->funcs->setup(this);
}
| linux-master | drivers/gpu/drm/loongson/lsdc_pixpll.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <drm/drm_managed.h>
#include "lsdc_drv.h"
#include "lsdc_output.h"
/*
* __lsdc_gpio_i2c_set - set the state of a gpio pin indicated by mask
* @mask: gpio pin mask
* @state: "0" for low, "1" for high
*/
static void __lsdc_gpio_i2c_set(struct lsdc_i2c * const li2c, int mask, int state)
{
struct lsdc_device *ldev = to_lsdc(li2c->ddev);
unsigned long flags;
u8 val;
spin_lock_irqsave(&ldev->reglock, flags);
if (state) {
/*
* Setting this pin as input directly, write 1 for input.
* The external pull-up resistor will pull the level up
*/
val = readb(li2c->dir_reg);
val |= mask;
writeb(val, li2c->dir_reg);
} else {
/* First set this pin as output, write 0 for output */
val = readb(li2c->dir_reg);
val &= ~mask;
writeb(val, li2c->dir_reg);
/* Then, make this pin output 0 */
val = readb(li2c->dat_reg);
val &= ~mask;
writeb(val, li2c->dat_reg);
}
spin_unlock_irqrestore(&ldev->reglock, flags);
}
/*
* __lsdc_gpio_i2c_get - read value back from the gpio pin indicated by mask
* @mask: gpio pin mask
* return "0" for low, "1" for high
*/
static int __lsdc_gpio_i2c_get(struct lsdc_i2c * const li2c, int mask)
{
struct lsdc_device *ldev = to_lsdc(li2c->ddev);
unsigned long flags;
u8 val;
spin_lock_irqsave(&ldev->reglock, flags);
/* First set this pin as input */
val = readb(li2c->dir_reg);
val |= mask;
writeb(val, li2c->dir_reg);
/* Then get level state from this pin */
val = readb(li2c->dat_reg);
spin_unlock_irqrestore(&ldev->reglock, flags);
return (val & mask) ? 1 : 0;
}
static void lsdc_gpio_i2c_set_sda(void *i2c, int state)
{
struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
/* set state on the li2c->sda pin */
return __lsdc_gpio_i2c_set(li2c, li2c->sda, state);
}
static void lsdc_gpio_i2c_set_scl(void *i2c, int state)
{
struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
/* set state on the li2c->scl pin */
return __lsdc_gpio_i2c_set(li2c, li2c->scl, state);
}
static int lsdc_gpio_i2c_get_sda(void *i2c)
{
struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
/* read value from the li2c->sda pin */
return __lsdc_gpio_i2c_get(li2c, li2c->sda);
}
static int lsdc_gpio_i2c_get_scl(void *i2c)
{
struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
/* read the value from the li2c->scl pin */
return __lsdc_gpio_i2c_get(li2c, li2c->scl);
}
static void lsdc_destroy_i2c(struct drm_device *ddev, void *data)
{
struct lsdc_i2c *li2c = (struct lsdc_i2c *)data;
if (li2c) {
i2c_del_adapter(&li2c->adapter);
kfree(li2c);
}
}
/*
* The DC in ls7a1000/ls7a2000/ls2k2000 has builtin gpio hardware
*
* @reg_base: gpio reg base
* @index: output channel index, 0 for PIPE0, 1 for PIPE1
*/
int lsdc_create_i2c_chan(struct drm_device *ddev,
struct lsdc_display_pipe *dispipe,
unsigned int index)
{
struct lsdc_device *ldev = to_lsdc(ddev);
struct i2c_adapter *adapter;
struct lsdc_i2c *li2c;
int ret;
li2c = kzalloc(sizeof(*li2c), GFP_KERNEL);
if (!li2c)
return -ENOMEM;
dispipe->li2c = li2c;
if (index == 0) {
li2c->sda = 0x01; /* pin 0 */
li2c->scl = 0x02; /* pin 1 */
} else if (index == 1) {
li2c->sda = 0x04; /* pin 2 */
li2c->scl = 0x08; /* pin 3 */
} else {
return -ENOENT;
}
li2c->ddev = ddev;
li2c->dir_reg = ldev->reg_base + LS7A_DC_GPIO_DIR_REG;
li2c->dat_reg = ldev->reg_base + LS7A_DC_GPIO_DAT_REG;
li2c->bit.setsda = lsdc_gpio_i2c_set_sda;
li2c->bit.setscl = lsdc_gpio_i2c_set_scl;
li2c->bit.getsda = lsdc_gpio_i2c_get_sda;
li2c->bit.getscl = lsdc_gpio_i2c_get_scl;
li2c->bit.udelay = 5;
li2c->bit.timeout = usecs_to_jiffies(2200);
li2c->bit.data = li2c;
adapter = &li2c->adapter;
adapter->algo_data = &li2c->bit;
adapter->owner = THIS_MODULE;
adapter->class = I2C_CLASS_DDC;
adapter->dev.parent = ddev->dev;
adapter->nr = -1;
snprintf(adapter->name, sizeof(adapter->name), "lsdc-i2c%u", index);
i2c_set_adapdata(adapter, li2c);
ret = i2c_bit_add_bus(adapter);
if (ret) {
kfree(li2c);
return ret;
}
ret = drmm_add_action_or_reset(ddev, lsdc_destroy_i2c, li2c);
if (ret)
return ret;
drm_info(ddev, "%s(sda pin mask=%u, scl pin mask=%u) created\n",
adapter->name, li2c->sda, li2c->scl);
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_i2c.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "lsdc_drv.h"
#include "lsdc_regs.h"
#include "lsdc_ttm.h"
static const u32 lsdc_primary_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const u32 lsdc_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
static const u64 lsdc_fb_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static unsigned int lsdc_get_fb_offset(struct drm_framebuffer *fb,
struct drm_plane_state *state)
{
unsigned int offset = fb->offsets[0];
offset += fb->format->cpp[0] * (state->src_x >> 16);
offset += fb->pitches[0] * (state->src_y >> 16);
return offset;
}
static u64 lsdc_fb_base_addr(struct drm_framebuffer *fb)
{
struct lsdc_device *ldev = to_lsdc(fb->dev);
struct lsdc_bo *lbo = gem_to_lsdc_bo(fb->obj[0]);
return lsdc_bo_gpu_offset(lbo) + ldev->vram_base;
}
static int lsdc_primary_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
if (!crtc)
return 0;
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
}
static void lsdc_primary_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lsdc_primary *primary = to_lsdc_primary(plane);
const struct lsdc_primary_plane_ops *ops = primary->ops;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_framebuffer *old_fb = old_plane_state->fb;
u64 fb_addr = lsdc_fb_base_addr(new_fb);
fb_addr += lsdc_get_fb_offset(new_fb, new_plane_state);
ops->update_fb_addr(primary, fb_addr);
ops->update_fb_stride(primary, new_fb->pitches[0]);
if (!old_fb || old_fb->format != new_fb->format)
ops->update_fb_format(primary, new_fb->format);
}
static void lsdc_primary_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
/*
* Do nothing, just prevent call into atomic_update().
* Writing the format as LSDC_PF_NONE can disable the primary,
* But it seems not necessary...
*/
drm_dbg(plane->dev, "%s disabled\n", plane->name);
}
static int lsdc_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct lsdc_bo *lbo;
u64 gpu_vaddr;
int ret;
if (!fb)
return 0;
lbo = gem_to_lsdc_bo(fb->obj[0]);
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {
drm_err(plane->dev, "bo %p reserve failed\n", lbo);
return ret;
}
ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_VRAM, &gpu_vaddr);
lsdc_bo_unreserve(lbo);
if (unlikely(ret)) {
drm_err(plane->dev, "bo %p pin failed\n", lbo);
return ret;
}
lsdc_bo_ref(lbo);
if (plane->type != DRM_PLANE_TYPE_CURSOR)
drm_dbg(plane->dev,
"%s[%p] pin at 0x%llx, bo size: %zu\n",
plane->name, lbo, gpu_vaddr, lsdc_bo_size(lbo));
return drm_gem_plane_helper_prepare_fb(plane, new_state);
}
static void lsdc_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_framebuffer *fb = old_state->fb;
struct lsdc_bo *lbo;
int ret;
if (!fb)
return;
lbo = gem_to_lsdc_bo(fb->obj[0]);
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {
drm_err(plane->dev, "%p reserve failed\n", lbo);
return;
}
lsdc_bo_unpin(lbo);
lsdc_bo_unreserve(lbo);
lsdc_bo_unref(lbo);
if (plane->type != DRM_PLANE_TYPE_CURSOR)
drm_dbg(plane->dev, "%s unpin\n", plane->name);
}
static const struct drm_plane_helper_funcs lsdc_primary_helper_funcs = {
.prepare_fb = lsdc_plane_prepare_fb,
.cleanup_fb = lsdc_plane_cleanup_fb,
.atomic_check = lsdc_primary_atomic_check,
.atomic_update = lsdc_primary_atomic_update,
.atomic_disable = lsdc_primary_atomic_disable,
};
static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state;
struct drm_crtc_state *crtc_state;
new_state = drm_atomic_get_new_plane_state(state, plane);
if (!plane->state || !plane->state->fb) {
drm_dbg(plane->dev, "%s: state is NULL\n", plane->name);
return -EINVAL;
}
if (new_state->crtc_w != new_state->crtc_h) {
drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
new_state->crtc_w, new_state->crtc_h);
return -EINVAL;
}
if (new_state->crtc_w != 64 && new_state->crtc_w != 32) {
drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
new_state->crtc_w, new_state->crtc_h);
return -EINVAL;
}
crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
if (!crtc_state->active)
return -EINVAL;
if (plane->state->crtc != new_state->crtc ||
plane->state->src_w != new_state->src_w ||
plane->state->src_h != new_state->src_h ||
plane->state->crtc_w != new_state->crtc_w ||
plane->state->crtc_h != new_state->crtc_h)
return -EINVAL;
if (new_state->visible != plane->state->visible)
return -EINVAL;
return drm_atomic_helper_check_plane_state(plane->state,
crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
}
static void lsdc_cursor_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
const struct lsdc_cursor_plane_ops *ops = cursor->ops;
struct drm_framebuffer *old_fb = plane->state->fb;
struct drm_framebuffer *new_fb;
struct drm_plane_state *new_state;
new_state = drm_atomic_get_new_plane_state(state, plane);
new_fb = plane->state->fb;
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
plane->state->crtc_h = new_state->crtc_h;
plane->state->crtc_w = new_state->crtc_w;
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
if (new_state->visible) {
enum lsdc_cursor_size cursor_size;
switch (new_state->crtc_w) {
case 64:
cursor_size = CURSOR_SIZE_64X64;
break;
case 32:
cursor_size = CURSOR_SIZE_32X32;
break;
default:
cursor_size = CURSOR_SIZE_32X32;
break;
}
ops->update_position(cursor, new_state->crtc_x, new_state->crtc_y);
ops->update_cfg(cursor, cursor_size, CURSOR_FORMAT_ARGB8888);
if (!old_fb || old_fb != new_fb)
ops->update_bo_addr(cursor, lsdc_fb_base_addr(new_fb));
}
}
/* ls7a1000 cursor plane helpers */
static int ls7a1000_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
crtc = new_plane_state->crtc;
if (!crtc) {
drm_dbg(plane->dev, "%s is not bind to a crtc\n", plane->name);
return 0;
}
if (new_plane_state->crtc_w != 32 || new_plane_state->crtc_h != 32) {
drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
new_plane_state->crtc_w, new_plane_state->crtc_h);
return -EINVAL;
}
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
}
static void ls7a1000_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_framebuffer *old_fb = old_plane_state->fb;
const struct lsdc_cursor_plane_ops *ops = cursor->ops;
u64 addr = lsdc_fb_base_addr(new_fb);
if (!new_plane_state->visible)
return;
ops->update_position(cursor, new_plane_state->crtc_x, new_plane_state->crtc_y);
if (!old_fb || old_fb != new_fb)
ops->update_bo_addr(cursor, addr);
ops->update_cfg(cursor, CURSOR_SIZE_32X32, CURSOR_FORMAT_ARGB8888);
}
static void ls7a1000_cursor_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
const struct lsdc_cursor_plane_ops *ops = cursor->ops;
ops->update_cfg(cursor, CURSOR_SIZE_32X32, CURSOR_FORMAT_DISABLE);
}
static const struct drm_plane_helper_funcs ls7a1000_cursor_plane_helper_funcs = {
.prepare_fb = lsdc_plane_prepare_fb,
.cleanup_fb = lsdc_plane_cleanup_fb,
.atomic_check = ls7a1000_cursor_plane_atomic_check,
.atomic_update = ls7a1000_cursor_plane_atomic_update,
.atomic_disable = ls7a1000_cursor_plane_atomic_disable,
.atomic_async_check = lsdc_cursor_plane_atomic_async_check,
.atomic_async_update = lsdc_cursor_plane_atomic_async_update,
};
/* ls7a2000 cursor plane helpers */
static int ls7a2000_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
crtc = new_plane_state->crtc;
if (!crtc) {
drm_dbg(plane->dev, "%s is not bind to a crtc\n", plane->name);
return 0;
}
if (new_plane_state->crtc_w != new_plane_state->crtc_h) {
drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
new_plane_state->crtc_w, new_plane_state->crtc_h);
return -EINVAL;
}
if (new_plane_state->crtc_w != 64 && new_plane_state->crtc_w != 32) {
drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
new_plane_state->crtc_w, new_plane_state->crtc_h);
return -EINVAL;
}
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
}
/* Update the format, size and location of the cursor */
static void ls7a2000_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_framebuffer *old_fb = old_plane_state->fb;
const struct lsdc_cursor_plane_ops *ops = cursor->ops;
enum lsdc_cursor_size cursor_size;
if (!new_plane_state->visible)
return;
ops->update_position(cursor, new_plane_state->crtc_x, new_plane_state->crtc_y);
if (!old_fb || new_fb != old_fb) {
u64 addr = lsdc_fb_base_addr(new_fb);
ops->update_bo_addr(cursor, addr);
}
switch (new_plane_state->crtc_w) {
case 64:
cursor_size = CURSOR_SIZE_64X64;
break;
case 32:
cursor_size = CURSOR_SIZE_32X32;
break;
default:
cursor_size = CURSOR_SIZE_64X64;
break;
}
ops->update_cfg(cursor, cursor_size, CURSOR_FORMAT_ARGB8888);
}
static void ls7a2000_cursor_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
const struct lsdc_cursor_plane_ops *hw_ops = cursor->ops;
hw_ops->update_cfg(cursor, CURSOR_SIZE_64X64, CURSOR_FORMAT_DISABLE);
}
static const struct drm_plane_helper_funcs ls7a2000_cursor_plane_helper_funcs = {
.prepare_fb = lsdc_plane_prepare_fb,
.cleanup_fb = lsdc_plane_cleanup_fb,
.atomic_check = ls7a2000_cursor_plane_atomic_check,
.atomic_update = ls7a2000_cursor_plane_atomic_update,
.atomic_disable = ls7a2000_cursor_plane_atomic_disable,
.atomic_async_check = lsdc_cursor_plane_atomic_async_check,
.atomic_async_update = lsdc_cursor_plane_atomic_async_update,
};
static void lsdc_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
u64 addr;
if (!fb)
return;
addr = lsdc_fb_base_addr(fb);
drm_printf(p, "\tdma addr=%llx\n", addr);
}
static const struct drm_plane_funcs lsdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_print_state = lsdc_plane_atomic_print_state,
};
/* Primary plane 0 hardware related ops */
static void lsdc_primary0_update_fb_addr(struct lsdc_primary *primary, u64 addr)
{
struct lsdc_device *ldev = primary->ldev;
u32 status;
u32 lo, hi;
/* 40-bit width physical address bus */
lo = addr & 0xFFFFFFFF;
hi = (addr >> 32) & 0xFF;
status = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
if (status & FB_REG_IN_USING) {
lsdc_wreg32(ldev, LSDC_CRTC0_FB1_ADDR_LO_REG, lo);
lsdc_wreg32(ldev, LSDC_CRTC0_FB1_ADDR_HI_REG, hi);
} else {
lsdc_wreg32(ldev, LSDC_CRTC0_FB0_ADDR_LO_REG, lo);
lsdc_wreg32(ldev, LSDC_CRTC0_FB0_ADDR_HI_REG, hi);
}
}
static void lsdc_primary0_update_fb_stride(struct lsdc_primary *primary, u32 stride)
{
struct lsdc_device *ldev = primary->ldev;
lsdc_wreg32(ldev, LSDC_CRTC0_STRIDE_REG, stride);
}
static void lsdc_primary0_update_fb_format(struct lsdc_primary *primary,
const struct drm_format_info *format)
{
struct lsdc_device *ldev = primary->ldev;
u32 status;
status = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
/*
* TODO: add RGB565 support, only support XRBG8888 at present
*/
status &= ~CFG_PIX_FMT_MASK;
status |= LSDC_PF_XRGB8888;
lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, status);
}
/* Primary plane 1 hardware related ops */
static void lsdc_primary1_update_fb_addr(struct lsdc_primary *primary, u64 addr)
{
struct lsdc_device *ldev = primary->ldev;
u32 status;
u32 lo, hi;
/* 40-bit width physical address bus */
lo = addr & 0xFFFFFFFF;
hi = (addr >> 32) & 0xFF;
status = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
if (status & FB_REG_IN_USING) {
lsdc_wreg32(ldev, LSDC_CRTC1_FB1_ADDR_LO_REG, lo);
lsdc_wreg32(ldev, LSDC_CRTC1_FB1_ADDR_HI_REG, hi);
} else {
lsdc_wreg32(ldev, LSDC_CRTC1_FB0_ADDR_LO_REG, lo);
lsdc_wreg32(ldev, LSDC_CRTC1_FB0_ADDR_HI_REG, hi);
}
}
static void lsdc_primary1_update_fb_stride(struct lsdc_primary *primary, u32 stride)
{
struct lsdc_device *ldev = primary->ldev;
lsdc_wreg32(ldev, LSDC_CRTC1_STRIDE_REG, stride);
}
static void lsdc_primary1_update_fb_format(struct lsdc_primary *primary,
const struct drm_format_info *format)
{
struct lsdc_device *ldev = primary->ldev;
u32 status;
status = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
/*
* TODO: add RGB565 support, only support XRBG8888 at present
*/
status &= ~CFG_PIX_FMT_MASK;
status |= LSDC_PF_XRGB8888;
lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, status);
}
static const struct lsdc_primary_plane_ops lsdc_primary_plane_hw_ops[2] = {
{
.update_fb_addr = lsdc_primary0_update_fb_addr,
.update_fb_stride = lsdc_primary0_update_fb_stride,
.update_fb_format = lsdc_primary0_update_fb_format,
},
{
.update_fb_addr = lsdc_primary1_update_fb_addr,
.update_fb_stride = lsdc_primary1_update_fb_stride,
.update_fb_format = lsdc_primary1_update_fb_format,
},
};
/*
* Update location, format, enable and disable state of the cursor,
* For those who have two hardware cursor, let cursor 0 is attach to CRTC-0,
* cursor 1 is attach to CRTC-1. Compositing the primary plane and cursor
* plane is automatically done by hardware, the cursor is alway on the top of
* the primary plane. In other word, z-order is fixed in hardware and cannot
* be changed. For those old DC who has only one hardware cursor, we made it
* shared by the two screen, this works on extend screen mode.
*/
/* cursor plane 0 (for pipe 0) related hardware ops */
static void lsdc_cursor0_update_bo_addr(struct lsdc_cursor *cursor, u64 addr)
{
struct lsdc_device *ldev = cursor->ldev;
/* 40-bit width physical address bus */
lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_HI_REG, (addr >> 32) & 0xFF);
lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_LO_REG, addr);
}
static void lsdc_cursor0_update_position(struct lsdc_cursor *cursor, int x, int y)
{
struct lsdc_device *ldev = cursor->ldev;
if (x < 0)
x = 0;
if (y < 0)
y = 0;
lsdc_wreg32(ldev, LSDC_CURSOR0_POSITION_REG, (y << 16) | x);
}
static void lsdc_cursor0_update_cfg(struct lsdc_cursor *cursor,
enum lsdc_cursor_size cursor_size,
enum lsdc_cursor_format fmt)
{
struct lsdc_device *ldev = cursor->ldev;
u32 cfg;
cfg = CURSOR_ON_CRTC0 << CURSOR_LOCATION_SHIFT |
cursor_size << CURSOR_SIZE_SHIFT |
fmt << CURSOR_FORMAT_SHIFT;
lsdc_wreg32(ldev, LSDC_CURSOR0_CFG_REG, cfg);
}
/* cursor plane 1 (for pipe 1) related hardware ops */
static void lsdc_cursor1_update_bo_addr(struct lsdc_cursor *cursor, u64 addr)
{
struct lsdc_device *ldev = cursor->ldev;
/* 40-bit width physical address bus */
lsdc_wreg32(ldev, LSDC_CURSOR1_ADDR_HI_REG, (addr >> 32) & 0xFF);
lsdc_wreg32(ldev, LSDC_CURSOR1_ADDR_LO_REG, addr);
}
static void lsdc_cursor1_update_position(struct lsdc_cursor *cursor, int x, int y)
{
struct lsdc_device *ldev = cursor->ldev;
if (x < 0)
x = 0;
if (y < 0)
y = 0;
lsdc_wreg32(ldev, LSDC_CURSOR1_POSITION_REG, (y << 16) | x);
}
static void lsdc_cursor1_update_cfg(struct lsdc_cursor *cursor,
enum lsdc_cursor_size cursor_size,
enum lsdc_cursor_format fmt)
{
struct lsdc_device *ldev = cursor->ldev;
u32 cfg;
cfg = CURSOR_ON_CRTC1 << CURSOR_LOCATION_SHIFT |
cursor_size << CURSOR_SIZE_SHIFT |
fmt << CURSOR_FORMAT_SHIFT;
lsdc_wreg32(ldev, LSDC_CURSOR1_CFG_REG, cfg);
}
/* The hardware cursors become normal since ls7a2000/ls2k2000 */
static const struct lsdc_cursor_plane_ops ls7a2000_cursor_hw_ops[2] = {
{
.update_bo_addr = lsdc_cursor0_update_bo_addr,
.update_cfg = lsdc_cursor0_update_cfg,
.update_position = lsdc_cursor0_update_position,
},
{
.update_bo_addr = lsdc_cursor1_update_bo_addr,
.update_cfg = lsdc_cursor1_update_cfg,
.update_position = lsdc_cursor1_update_position,
},
};
/* Quirks for cursor 1, only for old loongson display controller */
static void lsdc_cursor1_update_bo_addr_quirk(struct lsdc_cursor *cursor, u64 addr)
{
struct lsdc_device *ldev = cursor->ldev;
/* 40-bit width physical address bus */
lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_HI_REG, (addr >> 32) & 0xFF);
lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_LO_REG, addr);
}
static void lsdc_cursor1_update_position_quirk(struct lsdc_cursor *cursor, int x, int y)
{
struct lsdc_device *ldev = cursor->ldev;
if (x < 0)
x = 0;
if (y < 0)
y = 0;
lsdc_wreg32(ldev, LSDC_CURSOR0_POSITION_REG, (y << 16) | x);
}
static void lsdc_cursor1_update_cfg_quirk(struct lsdc_cursor *cursor,
enum lsdc_cursor_size cursor_size,
enum lsdc_cursor_format fmt)
{
struct lsdc_device *ldev = cursor->ldev;
u32 cfg;
cfg = CURSOR_ON_CRTC1 << CURSOR_LOCATION_SHIFT |
cursor_size << CURSOR_SIZE_SHIFT |
fmt << CURSOR_FORMAT_SHIFT;
lsdc_wreg32(ldev, LSDC_CURSOR0_CFG_REG, cfg);
}
/*
* The unforgiving LS7A1000/LS2K1000 has only one hardware cursors plane
*/
static const struct lsdc_cursor_plane_ops ls7a1000_cursor_hw_ops[2] = {
{
.update_bo_addr = lsdc_cursor0_update_bo_addr,
.update_cfg = lsdc_cursor0_update_cfg,
.update_position = lsdc_cursor0_update_position,
},
{
.update_bo_addr = lsdc_cursor1_update_bo_addr_quirk,
.update_cfg = lsdc_cursor1_update_cfg_quirk,
.update_position = lsdc_cursor1_update_position_quirk,
},
};
int lsdc_primary_plane_init(struct drm_device *ddev,
struct drm_plane *plane,
unsigned int index)
{
struct lsdc_primary *primary = to_lsdc_primary(plane);
int ret;
ret = drm_universal_plane_init(ddev, plane, 1 << index,
&lsdc_plane_funcs,
lsdc_primary_formats,
ARRAY_SIZE(lsdc_primary_formats),
lsdc_fb_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"ls-primary-plane-%u", index);
if (ret)
return ret;
drm_plane_helper_add(plane, &lsdc_primary_helper_funcs);
primary->ldev = to_lsdc(ddev);
primary->ops = &lsdc_primary_plane_hw_ops[index];
return 0;
}
int ls7a1000_cursor_plane_init(struct drm_device *ddev,
struct drm_plane *plane,
unsigned int index)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
int ret;
ret = drm_universal_plane_init(ddev, plane, 1 << index,
&lsdc_plane_funcs,
lsdc_cursor_formats,
ARRAY_SIZE(lsdc_cursor_formats),
lsdc_fb_format_modifiers,
DRM_PLANE_TYPE_CURSOR,
"ls-cursor-plane-%u", index);
if (ret)
return ret;
cursor->ldev = to_lsdc(ddev);
cursor->ops = &ls7a1000_cursor_hw_ops[index];
drm_plane_helper_add(plane, &ls7a1000_cursor_plane_helper_funcs);
return 0;
}
int ls7a2000_cursor_plane_init(struct drm_device *ddev,
struct drm_plane *plane,
unsigned int index)
{
struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
int ret;
ret = drm_universal_plane_init(ddev, plane, 1 << index,
&lsdc_plane_funcs,
lsdc_cursor_formats,
ARRAY_SIZE(lsdc_cursor_formats),
lsdc_fb_format_modifiers,
DRM_PLANE_TYPE_CURSOR,
"ls-cursor-plane-%u", index);
if (ret)
return ret;
cursor->ldev = to_lsdc(ddev);
cursor->ops = &ls7a2000_cursor_hw_ops[index];
drm_plane_helper_add(plane, &ls7a2000_cursor_plane_helper_funcs);
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_plane.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include "lsdc_drv.h"
#include "lsdc_probe.h"
/*
* Processor ID (implementation) values for bits 15:8 of the PRID register.
*/
#define LOONGSON_CPU_IMP_MASK 0xff00
#define LOONGSON_CPU_IMP_SHIFT 8
#define LOONGARCH_CPU_IMP_LS2K1000 0xa0
#define LOONGARCH_CPU_IMP_LS2K2000 0xb0
#define LOONGARCH_CPU_IMP_LS3A5000 0xc0
#define LOONGSON_CPU_MIPS_IMP_LS2K 0x61 /* Loongson 2K Mips series SoC */
/*
* Particular Revision values for bits 7:0 of the PRID register.
*/
#define LOONGSON_CPU_REV_MASK 0x00ff
#define LOONGARCH_CPUCFG_PRID_REG 0x0
/*
* We can achieve fine-grained control with the information about the host.
*/
unsigned int loongson_cpu_get_prid(u8 *imp, u8 *rev)
{
unsigned int prid = 0;
#if defined(__loongarch__)
__asm__ volatile("cpucfg %0, %1\n\t"
: "=&r"(prid)
: "r"(LOONGARCH_CPUCFG_PRID_REG)
);
#endif
#if defined(__mips__)
__asm__ volatile("mfc0\t%0, $15\n\t"
: "=r" (prid)
);
#endif
if (imp)
*imp = (prid & LOONGSON_CPU_IMP_MASK) >> LOONGSON_CPU_IMP_SHIFT;
if (rev)
*rev = prid & LOONGSON_CPU_REV_MASK;
return prid;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_probe.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <drm/drm_vblank.h>
#include "lsdc_irq.h"
/*
* For the DC in LS7A2000, clearing interrupt status is achieved by
* write "1" to LSDC_INT_REG.
*
* For the DC in LS7A1000, clear interrupt status is achieved by write "0"
* to LSDC_INT_REG.
*
* Two different hardware engineers modify it as their will.
*/
irqreturn_t ls7a2000_dc_irq_handler(int irq, void *arg)
{
struct drm_device *ddev = arg;
struct lsdc_device *ldev = to_lsdc(ddev);
u32 val;
/* Read the interrupt status */
val = lsdc_rreg32(ldev, LSDC_INT_REG);
if ((val & INT_STATUS_MASK) == 0) {
drm_warn(ddev, "no interrupt occurs\n");
return IRQ_NONE;
}
ldev->irq_status = val;
/* write "1" to clear the interrupt status */
lsdc_wreg32(ldev, LSDC_INT_REG, val);
if (ldev->irq_status & INT_CRTC0_VSYNC)
drm_handle_vblank(ddev, 0);
if (ldev->irq_status & INT_CRTC1_VSYNC)
drm_handle_vblank(ddev, 1);
return IRQ_HANDLED;
}
/* For the DC in LS7A1000 and LS2K1000 */
irqreturn_t ls7a1000_dc_irq_handler(int irq, void *arg)
{
struct drm_device *ddev = arg;
struct lsdc_device *ldev = to_lsdc(ddev);
u32 val;
/* Read the interrupt status */
val = lsdc_rreg32(ldev, LSDC_INT_REG);
if ((val & INT_STATUS_MASK) == 0) {
drm_warn(ddev, "no interrupt occurs\n");
return IRQ_NONE;
}
ldev->irq_status = val;
/* write "0" to clear the interrupt status */
val &= ~(INT_CRTC0_VSYNC | INT_CRTC1_VSYNC);
lsdc_wreg32(ldev, LSDC_INT_REG, val);
if (ldev->irq_status & INT_CRTC0_VSYNC)
drm_handle_vblank(ddev, 0);
if (ldev->irq_status & INT_CRTC1_VSYNC)
drm_handle_vblank(ddev, 1);
return IRQ_HANDLED;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_irq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "lsdc_drv.h"
#include "lsdc_output.h"
/*
* The display controller in LS7A2000 has two display pipes
* Display pipe 0 is attached with a built-in transparent VGA encoder and
* a built-in HDMI encoder.
* Display pipe 1 has only one built-in HDMI encoder connected.
* ______________________ _____________
* | +-----+ | | |
* | CRTC0 -+--> | VGA | ----> VGA Connector ---> | VGA Monitor |<---+
* | | +-----+ | |_____________| |
* | | | ______________ |
* | | +------+ | | | |
* | +--> | HDMI | ----> HDMI Connector --> | HDMI Monitor |<--+
* | +------+ | |______________| |
* | +------+ | |
* | | i2c6 | <-------------------------------------------+
* | +------+ |
* | |
* | DC in LS7A2000 |
* | |
* | +------+ |
* | | i2c7 | <--------------------------------+
* | +------+ | |
* | | ______|_______
* | +------+ | | |
* | CRTC1 ---> | HDMI | ----> HDMI Connector ---> | HDMI Monitor |
* | +------+ | |______________|
* |______________________|
*/
static int ls7a2000_connector_get_modes(struct drm_connector *connector)
{
unsigned int num = 0;
struct edid *edid;
if (connector->ddc) {
edid = drm_get_edid(connector, connector->ddc);
if (edid) {
drm_connector_update_edid_property(connector, edid);
num = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return num;
}
num = drm_add_modes_noedid(connector, 1920, 1200);
drm_set_preferred_mode(connector, 1024, 768);
return num;
}
static struct drm_encoder *
ls7a2000_connector_get_best_encoder(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct lsdc_output *output = connector_to_lsdc_output(connector);
return &output->encoder;
}
static const struct drm_connector_helper_funcs ls7a2000_connector_helpers = {
.atomic_best_encoder = ls7a2000_connector_get_best_encoder,
.get_modes = ls7a2000_connector_get_modes,
};
/* debugfs */
#define LSDC_HDMI_REG(i, reg) { \
.name = __stringify_1(LSDC_HDMI##i##_##reg##_REG), \
.offset = LSDC_HDMI##i##_##reg##_REG, \
}
static const struct lsdc_reg32 ls7a2000_hdmi0_encoder_regs[] = {
LSDC_HDMI_REG(0, ZONE),
LSDC_HDMI_REG(0, INTF_CTRL),
LSDC_HDMI_REG(0, PHY_CTRL),
LSDC_HDMI_REG(0, PHY_PLL),
LSDC_HDMI_REG(0, AVI_INFO_CRTL),
LSDC_HDMI_REG(0, PHY_CAL),
LSDC_HDMI_REG(0, AUDIO_PLL_LO),
LSDC_HDMI_REG(0, AUDIO_PLL_HI),
{NULL, 0}, /* MUST be {NULL, 0} terminated */
};
static const struct lsdc_reg32 ls7a2000_hdmi1_encoder_regs[] = {
LSDC_HDMI_REG(1, ZONE),
LSDC_HDMI_REG(1, INTF_CTRL),
LSDC_HDMI_REG(1, PHY_CTRL),
LSDC_HDMI_REG(1, PHY_PLL),
LSDC_HDMI_REG(1, AVI_INFO_CRTL),
LSDC_HDMI_REG(1, PHY_CAL),
LSDC_HDMI_REG(1, AUDIO_PLL_LO),
LSDC_HDMI_REG(1, AUDIO_PLL_HI),
{NULL, 0}, /* MUST be {NULL, 0} terminated */
};
static int ls7a2000_hdmi_encoder_regs_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *ddev = node->minor->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
const struct lsdc_reg32 *preg;
preg = (const struct lsdc_reg32 *)node->info_ent->data;
while (preg->name) {
u32 offset = preg->offset;
seq_printf(m, "%s (0x%04x): 0x%08x\n",
preg->name, offset, lsdc_rreg32(ldev, offset));
++preg;
}
return 0;
}
static const struct drm_info_list ls7a2000_hdmi0_debugfs_files[] = {
{ "regs", ls7a2000_hdmi_encoder_regs_show, 0, (void *)ls7a2000_hdmi0_encoder_regs },
};
static const struct drm_info_list ls7a2000_hdmi1_debugfs_files[] = {
{ "regs", ls7a2000_hdmi_encoder_regs_show, 0, (void *)ls7a2000_hdmi1_encoder_regs },
};
static void ls7a2000_hdmi0_late_register(struct drm_connector *connector,
struct dentry *root)
{
struct drm_device *ddev = connector->dev;
struct drm_minor *minor = ddev->primary;
drm_debugfs_create_files(ls7a2000_hdmi0_debugfs_files,
ARRAY_SIZE(ls7a2000_hdmi0_debugfs_files),
root, minor);
}
static void ls7a2000_hdmi1_late_register(struct drm_connector *connector,
struct dentry *root)
{
struct drm_device *ddev = connector->dev;
struct drm_minor *minor = ddev->primary;
drm_debugfs_create_files(ls7a2000_hdmi1_debugfs_files,
ARRAY_SIZE(ls7a2000_hdmi1_debugfs_files),
root, minor);
}
/* monitor present detection */
static enum drm_connector_status
ls7a2000_hdmi0_vga_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *ddev = connector->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
u32 val;
val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG);
if (val & HDMI0_HPD_FLAG)
return connector_status_connected;
if (connector->ddc) {
if (drm_probe_ddc(connector->ddc))
return connector_status_connected;
return connector_status_disconnected;
}
return connector_status_unknown;
}
static enum drm_connector_status
ls7a2000_hdmi1_connector_detect(struct drm_connector *connector, bool force)
{
struct lsdc_device *ldev = to_lsdc(connector->dev);
u32 val;
val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG);
if (val & HDMI1_HPD_FLAG)
return connector_status_connected;
return connector_status_disconnected;
}
static const struct drm_connector_funcs ls7a2000_hdmi_connector_funcs[2] = {
{
.detect = ls7a2000_hdmi0_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.debugfs_init = ls7a2000_hdmi0_late_register,
},
{
.detect = ls7a2000_hdmi1_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.debugfs_init = ls7a2000_hdmi1_late_register,
},
};
/* Even though some board has only one hdmi on display pipe 1,
* We still need hook lsdc_encoder_funcs up on display pipe 0,
* This is because we need its reset() callback get called, to
* set the LSDC_HDMIx_CTRL_REG using software gpio emulated i2c.
* Otherwise, the firmware may set LSDC_HDMIx_CTRL_REG blindly.
*/
static void ls7a2000_hdmi0_encoder_reset(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
u32 val;
val = PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN;
lsdc_wreg32(ldev, LSDC_CRTC0_DVO_CONF_REG, val);
/* using software gpio emulated i2c */
val = lsdc_rreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG);
val &= ~HW_I2C_EN;
lsdc_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, val);
/* help the hdmi phy to get out of reset state */
lsdc_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, HDMI_PHY_RESET_N);
mdelay(20);
drm_dbg(ddev, "HDMI-0 Reset\n");
}
static void ls7a2000_hdmi1_encoder_reset(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
u32 val;
val = PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN;
lsdc_wreg32(ldev, LSDC_CRTC1_DVO_CONF_REG, val);
/* using software gpio emulated i2c */
val = lsdc_rreg32(ldev, LSDC_HDMI1_INTF_CTRL_REG);
val &= ~HW_I2C_EN;
lsdc_wreg32(ldev, LSDC_HDMI1_INTF_CTRL_REG, val);
/* help the hdmi phy to get out of reset state */
lsdc_wreg32(ldev, LSDC_HDMI1_PHY_CTRL_REG, HDMI_PHY_RESET_N);
mdelay(20);
drm_dbg(ddev, "HDMI-1 Reset\n");
}
static const struct drm_encoder_funcs ls7a2000_encoder_funcs[2] = {
{
.reset = ls7a2000_hdmi0_encoder_reset,
.destroy = drm_encoder_cleanup,
},
{
.reset = ls7a2000_hdmi1_encoder_reset,
.destroy = drm_encoder_cleanup,
},
};
static int ls7a2000_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct lsdc_output *output = encoder_to_lsdc_output(encoder);
struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
unsigned int index = dispipe->index;
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
struct hdmi_avi_infoframe infoframe;
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
unsigned char *ptr = &buffer[HDMI_INFOFRAME_HEADER_SIZE];
unsigned int content0, content1, content2, content3;
int err;
err = drm_hdmi_avi_infoframe_from_display_mode(&infoframe,
&output->connector,
mode);
if (err < 0) {
drm_err(ddev, "failed to setup AVI infoframe: %d\n", err);
return err;
}
/* Fixed infoframe configuration not linked to the mode */
infoframe.colorspace = HDMI_COLORSPACE_RGB;
infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
err = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
if (err < 0) {
drm_err(ddev, "failed to pack AVI infoframe: %d\n", err);
return err;
}
content0 = *(unsigned int *)ptr;
content1 = *(ptr + 4);
content2 = *(unsigned int *)(ptr + 5);
content3 = *(unsigned int *)(ptr + 9);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT0, index, content0);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT1, index, content1);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT2, index, content2);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT3, index, content3);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_INFO_CRTL_REG, index,
AVI_PKT_ENABLE | AVI_PKT_UPDATE);
drm_dbg(ddev, "Update HDMI-%u avi infoframe\n", index);
return 0;
}
static void ls7a2000_hdmi_atomic_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct lsdc_output *output = encoder_to_lsdc_output(encoder);
struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
unsigned int index = dispipe->index;
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
u32 val;
/* Disable the hdmi phy */
val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index);
val &= ~HDMI_PHY_EN;
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index, val);
/* Disable the hdmi interface */
val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index);
val &= ~HDMI_INTERFACE_EN;
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index, val);
drm_dbg(ddev, "HDMI-%u disabled\n", index);
}
static void ls7a2000_hdmi_atomic_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
struct lsdc_output *output = encoder_to_lsdc_output(encoder);
struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
unsigned int index = dispipe->index;
u32 val;
/* datasheet say it should larger than 48 */
val = 64 << HDMI_H_ZONE_IDLE_SHIFT | 64 << HDMI_V_ZONE_IDLE_SHIFT;
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_ZONE_REG, index, val);
val = HDMI_PHY_TERM_STATUS |
HDMI_PHY_TERM_DET_EN |
HDMI_PHY_TERM_H_EN |
HDMI_PHY_TERM_L_EN |
HDMI_PHY_RESET_N |
HDMI_PHY_EN;
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index, val);
udelay(2);
val = HDMI_CTL_PERIOD_MODE |
HDMI_AUDIO_EN |
HDMI_PACKET_EN |
HDMI_INTERFACE_EN |
(8 << HDMI_VIDEO_PREAMBLE_SHIFT);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index, val);
drm_dbg(ddev, "HDMI-%u enabled\n", index);
}
/*
* Fout = M * Fin
*
* M = (4 * LF) / (IDF * ODF)
*
* IDF: Input Division Factor
* ODF: Output Division Factor
* LF: Loop Factor
* M: Required Mult
*
* +--------------------------------------------------------+
* | Fin (kHZ) | M | IDF | LF | ODF | Fout(Mhz) |
* |-------------------+----+-----+----+-----+--------------|
* | 170000 ~ 340000 | 10 | 16 | 40 | 1 | 1700 ~ 3400 |
* | 85000 ~ 170000 | 10 | 8 | 40 | 2 | 850 ~ 1700 |
* | 42500 ~ 85000 | 10 | 4 | 40 | 4 | 425 ~ 850 |
* | 21250 ~ 42500 | 10 | 2 | 40 | 8 | 212.5 ~ 425 |
* | 20000 ~ 21250 | 10 | 1 | 40 | 16 | 200 ~ 212.5 |
* +--------------------------------------------------------+
*/
static void ls7a2000_hdmi_phy_pll_config(struct lsdc_device *ldev,
int fin,
unsigned int index)
{
struct drm_device *ddev = &ldev->base;
int count = 0;
u32 val;
/* Firstly, disable phy pll */
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, 0x0);
/*
* Most of time, loongson HDMI require M = 10
* for example, 10 = (4 * 40) / (8 * 2)
* here, write "1" to the ODF will get "2"
*/
if (fin >= 170000)
val = (16 << HDMI_PLL_IDF_SHIFT) |
(40 << HDMI_PLL_LF_SHIFT) |
(0 << HDMI_PLL_ODF_SHIFT);
else if (fin >= 85000)
val = (8 << HDMI_PLL_IDF_SHIFT) |
(40 << HDMI_PLL_LF_SHIFT) |
(1 << HDMI_PLL_ODF_SHIFT);
else if (fin >= 42500)
val = (4 << HDMI_PLL_IDF_SHIFT) |
(40 << HDMI_PLL_LF_SHIFT) |
(2 << HDMI_PLL_ODF_SHIFT);
else if (fin >= 21250)
val = (2 << HDMI_PLL_IDF_SHIFT) |
(40 << HDMI_PLL_LF_SHIFT) |
(3 << HDMI_PLL_ODF_SHIFT);
else
val = (1 << HDMI_PLL_IDF_SHIFT) |
(40 << HDMI_PLL_LF_SHIFT) |
(4 << HDMI_PLL_ODF_SHIFT);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, val);
val |= HDMI_PLL_ENABLE;
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, val);
udelay(2);
drm_dbg(ddev, "Fin of HDMI-%u: %d kHz\n", index, fin);
/* Wait hdmi phy pll lock */
do {
val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index);
if (val & HDMI_PLL_LOCKED) {
drm_dbg(ddev, "Setting HDMI-%u PLL take %d cycles\n",
index, count);
break;
}
++count;
} while (count < 1000);
lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CAL_REG, index, 0x0f000ff0);
if (count >= 1000)
drm_err(ddev, "Setting HDMI-%u PLL failed\n", index);
}
static void ls7a2000_hdmi_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct lsdc_output *output = encoder_to_lsdc_output(encoder);
struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
unsigned int index = dispipe->index;
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
struct drm_display_mode *mode = &crtc_state->mode;
ls7a2000_hdmi_phy_pll_config(ldev, mode->clock, index);
ls7a2000_hdmi_set_avi_infoframe(encoder, mode);
drm_dbg(ddev, "%s modeset finished\n", encoder->name);
}
static const struct drm_encoder_helper_funcs ls7a2000_encoder_helper_funcs = {
.atomic_disable = ls7a2000_hdmi_atomic_disable,
.atomic_enable = ls7a2000_hdmi_atomic_enable,
.atomic_mode_set = ls7a2000_hdmi_atomic_mode_set,
};
/*
* For LS7A2000:
*
* 1) Most of board export one vga + hdmi output interface.
* 2) Yet, Some boards export double hdmi output interface.
* 3) Still have boards export three output(2 hdmi + 1 vga).
*
* So let's hook hdmi helper funcs to all display pipe, don't miss.
* writing hdmi register do no harms.
*/
int ls7a2000_output_init(struct drm_device *ddev,
struct lsdc_display_pipe *dispipe,
struct i2c_adapter *ddc,
unsigned int pipe)
{
struct lsdc_output *output = &dispipe->output;
struct drm_encoder *encoder = &output->encoder;
struct drm_connector *connector = &output->connector;
int ret;
ret = drm_encoder_init(ddev, encoder, &ls7a2000_encoder_funcs[pipe],
DRM_MODE_ENCODER_TMDS, "encoder-%u", pipe);
if (ret)
return ret;
encoder->possible_crtcs = BIT(pipe);
drm_encoder_helper_add(encoder, &ls7a2000_encoder_helper_funcs);
ret = drm_connector_init_with_ddc(ddev, connector,
&ls7a2000_hdmi_connector_funcs[pipe],
DRM_MODE_CONNECTOR_HDMIA, ddc);
if (ret)
return ret;
drm_info(ddev, "display pipe-%u has HDMI %s\n", pipe, pipe ? "" : "and/or VGA");
drm_connector_helper_add(connector, &ls7a2000_connector_helpers);
drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_output_7a2000.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include "lsdc_drv.h"
#include "lsdc_ttm.h"
const char *lsdc_mem_type_to_str(uint32_t mem_type)
{
switch (mem_type) {
case TTM_PL_VRAM:
return "VRAM";
case TTM_PL_TT:
return "GTT";
case TTM_PL_SYSTEM:
return "SYSTEM";
default:
break;
}
return "Unknown";
}
const char *lsdc_domain_to_str(u32 domain)
{
switch (domain) {
case LSDC_GEM_DOMAIN_VRAM:
return "VRAM";
case LSDC_GEM_DOMAIN_GTT:
return "GTT";
case LSDC_GEM_DOMAIN_SYSTEM:
return "SYSTEM";
default:
break;
}
return "Unknown";
}
static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
{
u32 c = 0;
u32 pflags = 0;
u32 i;
if (lbo->tbo.base.size <= PAGE_SIZE)
pflags |= TTM_PL_FLAG_TOPDOWN;
lbo->placement.placement = lbo->placements;
lbo->placement.busy_placement = lbo->placements;
if (domain & LSDC_GEM_DOMAIN_VRAM) {
lbo->placements[c].mem_type = TTM_PL_VRAM;
lbo->placements[c++].flags = pflags;
}
if (domain & LSDC_GEM_DOMAIN_GTT) {
lbo->placements[c].mem_type = TTM_PL_TT;
lbo->placements[c++].flags = pflags;
}
if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
lbo->placements[c].mem_type = TTM_PL_SYSTEM;
lbo->placements[c++].flags = 0;
}
if (!c) {
lbo->placements[c].mem_type = TTM_PL_SYSTEM;
lbo->placements[c++].flags = 0;
}
lbo->placement.num_placement = c;
lbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
lbo->placements[i].fpfn = 0;
lbo->placements[i].lpfn = 0;
}
}
static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_fini(tt);
kfree(tt);
}
static struct ttm_tt *
lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
{
struct ttm_tt *tt;
int ret;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
if (!tt)
return NULL;
ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
if (ret < 0) {
kfree(tt);
return NULL;
}
return tt;
}
static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave && ttm->sg) {
drm_prime_sg_to_dma_addr_array(ttm->sg,
ttm->dma_address,
ttm->num_pages);
return 0;
}
return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave)
return;
return ttm_pool_free(&bdev->pool, ttm);
}
static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *tplacement)
{
struct ttm_resource *resource = tbo->resource;
struct lsdc_bo *lbo = to_lsdc_bo(tbo);
switch (resource->mem_type) {
case TTM_PL_VRAM:
lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
default:
lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
break;
}
*tplacement = lbo->placement;
}
static int lsdc_bo_move(struct ttm_buffer_object *tbo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
struct drm_device *ddev = tbo->base.dev;
struct ttm_resource *old_mem = tbo->resource;
struct lsdc_bo *lbo = to_lsdc_bo(tbo);
int ret;
if (unlikely(tbo->pin_count > 0)) {
drm_warn(ddev, "Can't move a pinned BO\n");
return -EINVAL;
}
ret = ttm_bo_wait_ctx(tbo, ctx);
if (ret)
return ret;
if (!old_mem) {
drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
lbo, lsdc_mem_type_to_str(new_mem->mem_type),
lsdc_bo_size(lbo));
ttm_bo_move_null(tbo, new_mem);
return 0;
}
if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
ttm_bo_move_null(tbo, new_mem);
drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
lbo, lsdc_bo_size(lbo));
return 0;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT) {
drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
lbo, lsdc_bo_size(lbo));
ttm_bo_move_null(tbo, new_mem);
return 0;
}
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
lbo, lsdc_bo_size(lbo));
ttm_resource_free(tbo, &tbo->resource);
ttm_bo_assign_mem(tbo, new_mem);
return 0;
}
drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
lbo,
lsdc_mem_type_to_str(old_mem->mem_type),
lsdc_mem_type_to_str(new_mem->mem_type),
lsdc_bo_size(lbo));
return ttm_bo_move_memcpy(tbo, ctx, new_mem);
}
static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct lsdc_device *ldev = tdev_to_ldev(bdev);
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
break;
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
}
return 0;
}
static struct ttm_device_funcs lsdc_bo_driver = {
.ttm_tt_create = lsdc_ttm_tt_create,
.ttm_tt_populate = lsdc_ttm_tt_populate,
.ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
.ttm_tt_destroy = lsdc_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = lsdc_bo_evict_flags,
.move = lsdc_bo_move,
.io_mem_reserve = lsdc_bo_reserve_io_mem,
};
u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
{
struct ttm_buffer_object *tbo = &lbo->tbo;
struct drm_device *ddev = tbo->base.dev;
struct ttm_resource *resource = tbo->resource;
if (unlikely(!tbo->pin_count)) {
drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
return 0;
}
if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
return 0;
return resource->start << PAGE_SHIFT;
}
size_t lsdc_bo_size(struct lsdc_bo *lbo)
{
struct ttm_buffer_object *tbo = &lbo->tbo;
return tbo->base.size;
}
int lsdc_bo_reserve(struct lsdc_bo *lbo)
{
return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
}
void lsdc_bo_unreserve(struct lsdc_bo *lbo)
{
return ttm_bo_unreserve(&lbo->tbo);
}
int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct ttm_buffer_object *tbo = &lbo->tbo;
struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
int ret;
if (tbo->pin_count)
goto bo_pinned;
if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
return -EINVAL;
if (domain)
lsdc_bo_set_placement(lbo, domain);
ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
if (unlikely(ret)) {
drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
return ret;
}
if (domain == LSDC_GEM_DOMAIN_VRAM)
ldev->vram_pinned_size += lsdc_bo_size(lbo);
else if (domain == LSDC_GEM_DOMAIN_GTT)
ldev->gtt_pinned_size += lsdc_bo_size(lbo);
bo_pinned:
ttm_bo_pin(tbo);
if (gpu_addr)
*gpu_addr = lsdc_bo_gpu_offset(lbo);
return 0;
}
void lsdc_bo_unpin(struct lsdc_bo *lbo)
{
struct ttm_buffer_object *tbo = &lbo->tbo;
struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
if (unlikely(!tbo->pin_count)) {
drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
return;
}
ttm_bo_unpin(tbo);
if (!tbo->pin_count) {
if (tbo->resource->mem_type == TTM_PL_VRAM)
ldev->vram_pinned_size -= lsdc_bo_size(lbo);
else if (tbo->resource->mem_type == TTM_PL_TT)
ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
}
}
void lsdc_bo_ref(struct lsdc_bo *lbo)
{
struct ttm_buffer_object *tbo = &lbo->tbo;
ttm_bo_get(tbo);
}
void lsdc_bo_unref(struct lsdc_bo *lbo)
{
struct ttm_buffer_object *tbo = &lbo->tbo;
ttm_bo_put(tbo);
}
int lsdc_bo_kmap(struct lsdc_bo *lbo)
{
struct ttm_buffer_object *tbo = &lbo->tbo;
struct drm_gem_object *gem = &tbo->base;
struct drm_device *ddev = gem->dev;
long ret;
int err;
ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0) {
drm_warn(ddev, "wait fence timeout\n");
return ret;
}
if (lbo->kptr)
return 0;
err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
if (err) {
drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
return err;
}
lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
return 0;
}
void lsdc_bo_kunmap(struct lsdc_bo *lbo)
{
if (!lbo->kptr)
return;
lbo->kptr = NULL;
ttm_bo_kunmap(&lbo->kmap);
}
void lsdc_bo_clear(struct lsdc_bo *lbo)
{
lsdc_bo_kmap(lbo);
if (lbo->is_iomem)
memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
else
memset(lbo->kptr, 0, lbo->size);
lsdc_bo_kunmap(lbo);
}
int lsdc_bo_evict_vram(struct drm_device *ddev)
{
struct lsdc_device *ldev = to_lsdc(ddev);
struct ttm_device *bdev = &ldev->bdev;
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, TTM_PL_VRAM);
if (unlikely(!man))
return 0;
return ttm_resource_manager_evict_all(bdev, man);
}
static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
{
struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
struct lsdc_bo *lbo = to_lsdc_bo(tbo);
mutex_lock(&ldev->gem.mutex);
list_del_init(&lbo->list);
mutex_unlock(&ldev->gem.mutex);
drm_gem_object_release(&tbo->base);
kfree(lbo);
}
struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
u32 domain,
size_t size,
bool kernel,
struct sg_table *sg,
struct dma_resv *resv)
{
struct lsdc_device *ldev = to_lsdc(ddev);
struct ttm_device *bdev = &ldev->bdev;
struct ttm_buffer_object *tbo;
struct lsdc_bo *lbo;
enum ttm_bo_type bo_type;
int ret;
lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
if (!lbo)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&lbo->list);
lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
LSDC_GEM_DOMAIN_GTT |
LSDC_GEM_DOMAIN_SYSTEM);
tbo = &lbo->tbo;
size = ALIGN(size, PAGE_SIZE);
ret = drm_gem_object_init(ddev, &tbo->base, size);
if (ret) {
kfree(lbo);
return ERR_PTR(ret);
}
tbo->bdev = bdev;
if (kernel)
bo_type = ttm_bo_type_kernel;
else if (sg)
bo_type = ttm_bo_type_sg;
else
bo_type = ttm_bo_type_device;
lsdc_bo_set_placement(lbo, domain);
lbo->size = size;
ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
false, sg, resv, lsdc_bo_destroy);
if (ret) {
kfree(lbo);
return ERR_PTR(ret);
}
return lbo;
}
struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
u32 domain,
size_t size)
{
struct lsdc_bo *lbo;
int ret;
lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
if (IS_ERR(lbo))
return ERR_CAST(lbo);
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {
lsdc_bo_unref(lbo);
return ERR_PTR(ret);
}
ret = lsdc_bo_pin(lbo, domain, NULL);
lsdc_bo_unreserve(lbo);
if (unlikely(ret)) {
lsdc_bo_unref(lbo);
return ERR_PTR(ret);
}
return lbo;
}
void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
{
int ret;
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret))
return;
lsdc_bo_unpin(lbo);
lsdc_bo_unreserve(lbo);
lsdc_bo_unref(lbo);
}
static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
{
struct lsdc_device *ldev = (struct lsdc_device *)data;
ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
ttm_device_fini(&ldev->bdev);
drm_dbg(ddev, "ttm finished\n");
}
int lsdc_ttm_init(struct lsdc_device *ldev)
{
struct drm_device *ddev = &ldev->base;
unsigned long num_vram_pages;
unsigned long num_gtt_pages;
int ret;
ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
ddev->anon_inode->i_mapping,
ddev->vma_offset_manager, false, true);
if (ret)
return ret;
num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
if (unlikely(ret))
return ret;
drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
/* 512M is far enough for us now */
ldev->gtt_size = 512 << 20;
num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
if (unlikely(ret))
return ret;
drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
}
void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
{
struct ttm_device *bdev = &ldev->bdev;
struct drm_device *ddev = &ldev->base;
struct drm_minor *minor = ddev->primary;
struct dentry *root = minor->debugfs_root;
struct ttm_resource_manager *vram_man;
struct ttm_resource_manager *gtt_man;
vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");
}
| linux-master | drivers/gpu/drm/loongson/lsdc_ttm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/pci.h>
#include <video/nomodeset.h>
#include "loongson_module.h"
static int loongson_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, loongson_modeset, int, 0400);
int loongson_vblank = 1;
MODULE_PARM_DESC(vblank, "Disable/Enable hw vblank support");
module_param_named(vblank, loongson_vblank, int, 0400);
static int __init loongson_module_init(void)
{
if (!loongson_modeset || video_firmware_drivers_only())
return -ENODEV;
return pci_register_driver(&lsdc_pci_driver);
}
module_init(loongson_module_init);
static void __exit loongson_module_exit(void)
{
pci_unregister_driver(&lsdc_pci_driver);
}
module_exit(loongson_module_exit);
| linux-master | drivers/gpu/drm/loongson/loongson_module.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/pci.h>
#include "lsdc_drv.h"
static const struct lsdc_kms_funcs ls7a1000_kms_funcs = {
.create_i2c = lsdc_create_i2c_chan,
.irq_handler = ls7a1000_dc_irq_handler,
.output_init = ls7a1000_output_init,
.cursor_plane_init = ls7a1000_cursor_plane_init,
.primary_plane_init = lsdc_primary_plane_init,
.crtc_init = ls7a1000_crtc_init,
};
static const struct lsdc_kms_funcs ls7a2000_kms_funcs = {
.create_i2c = lsdc_create_i2c_chan,
.irq_handler = ls7a2000_dc_irq_handler,
.output_init = ls7a2000_output_init,
.cursor_plane_init = ls7a2000_cursor_plane_init,
.primary_plane_init = lsdc_primary_plane_init,
.crtc_init = ls7a2000_crtc_init,
};
static const struct loongson_gfx_desc ls7a1000_gfx = {
.dc = {
.num_of_crtc = 2,
.max_pixel_clk = 200000,
.max_width = 2048,
.max_height = 2048,
.num_of_hw_cursor = 1,
.hw_cursor_w = 32,
.hw_cursor_h = 32,
.pitch_align = 256,
.has_vblank_counter = false,
.funcs = &ls7a1000_kms_funcs,
},
.conf_reg_base = LS7A1000_CONF_REG_BASE,
.gfxpll = {
.reg_offset = LS7A1000_PLL_GFX_REG,
.reg_size = 8,
},
.pixpll = {
[0] = {
.reg_offset = LS7A1000_PIXPLL0_REG,
.reg_size = 8,
},
[1] = {
.reg_offset = LS7A1000_PIXPLL1_REG,
.reg_size = 8,
},
},
.chip_id = CHIP_LS7A1000,
.model = "LS7A1000 bridge chipset",
};
static const struct loongson_gfx_desc ls7a2000_gfx = {
.dc = {
.num_of_crtc = 2,
.max_pixel_clk = 350000,
.max_width = 4096,
.max_height = 4096,
.num_of_hw_cursor = 2,
.hw_cursor_w = 64,
.hw_cursor_h = 64,
.pitch_align = 64,
.has_vblank_counter = true,
.funcs = &ls7a2000_kms_funcs,
},
.conf_reg_base = LS7A2000_CONF_REG_BASE,
.gfxpll = {
.reg_offset = LS7A2000_PLL_GFX_REG,
.reg_size = 8,
},
.pixpll = {
[0] = {
.reg_offset = LS7A2000_PIXPLL0_REG,
.reg_size = 8,
},
[1] = {
.reg_offset = LS7A2000_PIXPLL1_REG,
.reg_size = 8,
},
},
.chip_id = CHIP_LS7A2000,
.model = "LS7A2000 bridge chipset",
};
static const struct lsdc_desc *__chip_id_desc_table[] = {
[CHIP_LS7A1000] = &ls7a1000_gfx.dc,
[CHIP_LS7A2000] = &ls7a2000_gfx.dc,
[CHIP_LS_LAST] = NULL,
};
const struct lsdc_desc *
lsdc_device_probe(struct pci_dev *pdev, enum loongson_chip_id chip_id)
{
return __chip_id_desc_table[chip_id];
}
| linux-master | drivers/gpu/drm/loongson/loongson_device.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <drm/drm_debugfs.h>
#include "lsdc_benchmark.h"
#include "lsdc_drv.h"
#include "lsdc_gem.h"
#include "lsdc_ttm.h"
typedef void (*lsdc_copy_proc_t)(struct lsdc_bo *src_bo,
struct lsdc_bo *dst_bo,
unsigned int size,
int n);
static void lsdc_copy_gtt_to_vram_cpu(struct lsdc_bo *src_bo,
struct lsdc_bo *dst_bo,
unsigned int size,
int n)
{
lsdc_bo_kmap(src_bo);
lsdc_bo_kmap(dst_bo);
while (n--)
memcpy_toio(dst_bo->kptr, src_bo->kptr, size);
lsdc_bo_kunmap(src_bo);
lsdc_bo_kunmap(dst_bo);
}
static void lsdc_copy_vram_to_gtt_cpu(struct lsdc_bo *src_bo,
struct lsdc_bo *dst_bo,
unsigned int size,
int n)
{
lsdc_bo_kmap(src_bo);
lsdc_bo_kmap(dst_bo);
while (n--)
memcpy_fromio(dst_bo->kptr, src_bo->kptr, size);
lsdc_bo_kunmap(src_bo);
lsdc_bo_kunmap(dst_bo);
}
static void lsdc_copy_gtt_to_gtt_cpu(struct lsdc_bo *src_bo,
struct lsdc_bo *dst_bo,
unsigned int size,
int n)
{
lsdc_bo_kmap(src_bo);
lsdc_bo_kmap(dst_bo);
while (n--)
memcpy(dst_bo->kptr, src_bo->kptr, size);
lsdc_bo_kunmap(src_bo);
lsdc_bo_kunmap(dst_bo);
}
static void lsdc_benchmark_copy(struct lsdc_device *ldev,
unsigned int size,
unsigned int n,
u32 src_domain,
u32 dst_domain,
lsdc_copy_proc_t copy_proc,
struct drm_printer *p)
{
struct drm_device *ddev = &ldev->base;
struct lsdc_bo *src_bo;
struct lsdc_bo *dst_bo;
unsigned long start_jiffies;
unsigned long end_jiffies;
unsigned int throughput;
unsigned int time;
src_bo = lsdc_bo_create_kernel_pinned(ddev, src_domain, size);
dst_bo = lsdc_bo_create_kernel_pinned(ddev, dst_domain, size);
start_jiffies = jiffies;
copy_proc(src_bo, dst_bo, size, n);
end_jiffies = jiffies;
lsdc_bo_free_kernel_pinned(src_bo);
lsdc_bo_free_kernel_pinned(dst_bo);
time = jiffies_to_msecs(end_jiffies - start_jiffies);
throughput = (n * (size >> 10)) / time;
drm_printf(p,
"Copy bo of %uKiB %u times from %s to %s in %ums: %uMB/s\n",
size >> 10, n,
lsdc_domain_to_str(src_domain),
lsdc_domain_to_str(dst_domain),
time, throughput);
}
int lsdc_show_benchmark_copy(struct lsdc_device *ldev, struct drm_printer *p)
{
unsigned int buffer_size = 1920 * 1080 * 4;
unsigned int iteration = 60;
lsdc_benchmark_copy(ldev,
buffer_size,
iteration,
LSDC_GEM_DOMAIN_GTT,
LSDC_GEM_DOMAIN_GTT,
lsdc_copy_gtt_to_gtt_cpu,
p);
lsdc_benchmark_copy(ldev,
buffer_size,
iteration,
LSDC_GEM_DOMAIN_GTT,
LSDC_GEM_DOMAIN_VRAM,
lsdc_copy_gtt_to_vram_cpu,
p);
lsdc_benchmark_copy(ldev,
buffer_size,
iteration,
LSDC_GEM_DOMAIN_VRAM,
LSDC_GEM_DOMAIN_GTT,
lsdc_copy_vram_to_gtt_cpu,
p);
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_benchmark.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "lsdc_drv.h"
#include "lsdc_output.h"
/*
* The display controller in the LS7A1000 exports two DVO interfaces, thus
* external encoder is required, except connected to the DPI panel directly.
*
* ___________________ _________
* | -------| | |
* | CRTC0 --> | DVO0 ----> Encoder0 ---> Connector0 ---> | Display |
* | _ _ -------| ^ ^ |_________|
* | | | | | +------+ | | |
* | |_| |_| | i2c6 | <--------+-------------+
* | +------+ |
* | |
* | DC in LS7A1000 |
* | |
* | _ _ +------+ |
* | | | | | | i2c7 | <--------+-------------+
* | |_| |_| +------+ | | | _________
* | -------| | | | |
* | CRTC1 --> | DVO1 ----> Encoder1 ---> Connector1 ---> | Panel |
* | -------| |_________|
* |___________________|
*
* Currently, we assume the external encoders connected to the DVO are
* transparent. Loongson's DVO interface can directly drive RGB888 panels.
*
* TODO: Add support for non-transparent encoders
*/
static int ls7a1000_dpi_connector_get_modes(struct drm_connector *conn)
{
unsigned int num = 0;
struct edid *edid;
if (conn->ddc) {
edid = drm_get_edid(conn, conn->ddc);
if (edid) {
drm_connector_update_edid_property(conn, edid);
num = drm_add_edid_modes(conn, edid);
kfree(edid);
}
return num;
}
num = drm_add_modes_noedid(conn, 1920, 1200);
drm_set_preferred_mode(conn, 1024, 768);
return num;
}
static struct drm_encoder *
ls7a1000_dpi_connector_get_best_encoder(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct lsdc_output *output = connector_to_lsdc_output(connector);
return &output->encoder;
}
static const struct drm_connector_helper_funcs
ls7a1000_dpi_connector_helpers = {
.atomic_best_encoder = ls7a1000_dpi_connector_get_best_encoder,
.get_modes = ls7a1000_dpi_connector_get_modes,
};
static enum drm_connector_status
ls7a1000_dpi_connector_detect(struct drm_connector *connector, bool force)
{
struct i2c_adapter *ddc = connector->ddc;
if (ddc) {
if (drm_probe_ddc(ddc))
return connector_status_connected;
return connector_status_disconnected;
}
return connector_status_unknown;
}
static const struct drm_connector_funcs ls7a1000_dpi_connector_funcs = {
.detect = ls7a1000_dpi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state
};
static void ls7a1000_pipe0_encoder_reset(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
/*
* We need this for S3 support, screen will not lightup if don't set
* this register correctly.
*/
lsdc_wreg32(ldev, LSDC_CRTC0_DVO_CONF_REG,
PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN);
}
static void ls7a1000_pipe1_encoder_reset(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
/*
* We need this for S3 support, screen will not lightup if don't set
* this register correctly.
*/
/* DVO */
lsdc_wreg32(ldev, LSDC_CRTC1_DVO_CONF_REG,
BIT(31) | PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN);
}
static const struct drm_encoder_funcs ls7a1000_encoder_funcs[2] = {
{
.reset = ls7a1000_pipe0_encoder_reset,
.destroy = drm_encoder_cleanup,
},
{
.reset = ls7a1000_pipe1_encoder_reset,
.destroy = drm_encoder_cleanup,
},
};
int ls7a1000_output_init(struct drm_device *ddev,
struct lsdc_display_pipe *dispipe,
struct i2c_adapter *ddc,
unsigned int index)
{
struct lsdc_output *output = &dispipe->output;
struct drm_encoder *encoder = &output->encoder;
struct drm_connector *connector = &output->connector;
int ret;
ret = drm_encoder_init(ddev, encoder, &ls7a1000_encoder_funcs[index],
DRM_MODE_ENCODER_TMDS, "encoder-%u", index);
if (ret)
return ret;
encoder->possible_crtcs = BIT(index);
ret = drm_connector_init_with_ddc(ddev, connector,
&ls7a1000_dpi_connector_funcs,
DRM_MODE_CONNECTOR_DPI, ddc);
if (ret)
return ret;
drm_info(ddev, "display pipe-%u has a DVO\n", index);
drm_connector_helper_add(connector, &ls7a1000_dpi_connector_helpers);
drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
return 0;
}
| linux-master | drivers/gpu/drm/loongson/lsdc_output_7a1000.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <drm/drm_debugfs.h>
#include "lsdc_benchmark.h"
#include "lsdc_drv.h"
#include "lsdc_gem.h"
#include "lsdc_probe.h"
#include "lsdc_ttm.h"
/* device level debugfs */
static int lsdc_identify(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
const struct loongson_gfx_desc *gfx = to_loongson_gfx(ldev->descp);
u8 impl, rev;
loongson_cpu_get_prid(&impl, &rev);
seq_printf(m, "Running on cpu 0x%x, cpu revision: 0x%x\n",
impl, rev);
seq_printf(m, "Contained in: %s\n", gfx->model);
return 0;
}
static int lsdc_show_mm(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *ddev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&ddev->vma_offset_manager->vm_addr_space_mm, &p);
return 0;
}
static int lsdc_show_gfxpll_clock(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
struct drm_printer printer = drm_seq_file_printer(m);
struct loongson_gfxpll *gfxpll = ldev->gfxpll;
gfxpll->funcs->print(gfxpll, &printer, true);
return 0;
}
static int lsdc_show_benchmark(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
struct drm_printer printer = drm_seq_file_printer(m);
lsdc_show_benchmark_copy(ldev, &printer);
return 0;
}
static int lsdc_pdev_enable_io_mem(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
u16 cmd;
pci_read_config_word(ldev->dc, PCI_COMMAND, &cmd);
seq_printf(m, "PCI_COMMAND: 0x%x\n", cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
pci_write_config_word(ldev->dc, PCI_COMMAND, cmd);
pci_read_config_word(ldev->dc, PCI_COMMAND, &cmd);
seq_printf(m, "PCI_COMMAND: 0x%x\n", cmd);
return 0;
}
static struct drm_info_list lsdc_debugfs_list[] = {
{ "benchmark", lsdc_show_benchmark, 0, NULL },
{ "bos", lsdc_show_buffer_object, 0, NULL },
{ "chips", lsdc_identify, 0, NULL },
{ "clocks", lsdc_show_gfxpll_clock, 0, NULL },
{ "dc_enable", lsdc_pdev_enable_io_mem, 0, NULL },
{ "mm", lsdc_show_mm, 0, NULL },
};
void lsdc_debugfs_init(struct drm_minor *minor)
{
struct drm_device *ddev = minor->dev;
struct lsdc_device *ldev = to_lsdc(ddev);
unsigned int n = ARRAY_SIZE(lsdc_debugfs_list);
unsigned int i;
for (i = 0; i < n; ++i)
lsdc_debugfs_list[i].data = ldev;
drm_debugfs_create_files(lsdc_debugfs_list, n, minor->debugfs_root, minor);
lsdc_ttm_debugfs_init(ldev);
}
| linux-master | drivers/gpu/drm/loongson/lsdc_debugfs.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.